source
stringlengths
3
92
c
stringlengths
26
2.25M
sgelqf.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgelqf.c, normal z -> s, Fri Sep 28 17:38:01 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_gelqf * * Computes tile LQ factorization of a complex m-by-n matrix A. * The factorization has the form * \f[ A = L \times Q \f], * where L is a lower trapezoidal with positive diagonal and Q is a matrix with * orthonormal rows. * ******************************************************************************* * * @param[in] m * The number of rows of the matrix A. m >= 0. * * @param[in] n * The number of columns of the matrix A. n >= 0. * * @param[in,out] pA * On entry, pointer to the m-by-n matrix A. * On exit, the elements on and below the diagonal of the array * contain the m-by-min(m,n) lower trapezoidal matrix L (L is lower * triangular if M <= N); the elements above the diagonal represent * the orthogonal matrix Q as a product of elementary reflectors, stored * by tiles. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * * @param[out] T * On exit, auxiliary factorization data, required by plasma_sgelqs * to solve the system of equations. * Matrix of T is allocated inside this function and needs to be * destroyed by plasma_desc_destroy. * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************* * * @sa plasma_omp_sgelqf * @sa plasma_cgelqf * @sa plasma_dgelqf * @sa plasma_sgelqf * @sa plasma_sgelqs * ******************************************************************************/ int plasma_sgelqf(int m, int n, float *pA, int lda, plasma_desc_t *T) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if (m < 0) { plasma_error("illegal value of m"); return -1; } if (n < 0) { plasma_error("illegal value of n"); return -2; } if (lda < imax(1, m)) { plasma_error("illegal value of lda"); return -4; } // quick return if (imin(m, n) == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_gelqf(plasma, PlasmaRealFloat, m, n); // Set tiling parameters. int ib = plasma->ib; int nb = plasma->nb; plasma_enum_t householder_mode = plasma->householder_mode; // Create tile matrix. plasma_desc_t A; int retval; retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb, m, n, 0, 0, m, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } // Prepare descriptor T. retval = plasma_descT_create(A, ib, householder_mode, T); if (retval != PlasmaSuccess) { plasma_error("plasma_descT_create() failed"); return retval; } // Allocate workspace. plasma_workspace_t work; size_t lwork = nb + ib*nb; // gelqt: tau + work retval = plasma_workspace_create(&work, lwork, PlasmaRealFloat); if (retval != PlasmaSuccess) { plasma_error("plasma_workspace_create() failed"); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_sge2desc(pA, lda, A, &sequence, &request); // Call the tile async function. plasma_omp_sgelqf(A, *T, work, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_sdesc2ge(A, pA, lda, &sequence, &request); } // implicit synchronization plasma_workspace_destroy(&work); // Free matrix A in tile layout. plasma_desc_destroy(&A); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_gelqf * * Computes the tile LQ factorization of a matrix. * Non-blocking tile version of plasma_sgelqf(). * May return before the computation is finished. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in,out] A * Descriptor of matrix A. * A is stored in the tile layout. * * @param[out] T * Descriptor of matrix T. * On exit, auxiliary factorization data, required by plasma_sgelqs to * solve the system of equations. * * @param[in] work * Workspace for the auxiliary arrays needed by some coreblas kernels. * For LQ factorization, contains preallocated space for tau and work * arrays. Allocated by the plasma_workspace_create function. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_sgelqf * @sa plasma_omp_cgelqf * @sa plasma_omp_dgelqf * @sa plasma_omp_sgelqf * @sa plasma_omp_sgelqs * ******************************************************************************/ void plasma_omp_sgelqf(plasma_desc_t A, plasma_desc_t T, plasma_workspace_t work, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(T) != PlasmaSuccess) { plasma_error("invalid T"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_fatal_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_fatal_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (imin(A.m, A.n) == 0) return; // Call the parallel function. if (plasma->householder_mode == PlasmaTreeHouseholder) { plasma_psgelqf_tree(A, T, work, sequence, request); } else { plasma_psgelqf(A, T, work, sequence, request); } }
GB_unop__floor_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__floor_fp32_fp32) // op(A') function: GB (_unop_tran__floor_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = floorf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = floorf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = floorf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FLOOR || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__floor_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = floorf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = floorf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__floor_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
taskdep2-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Two tasks with depend clause to ensure execution order, no data races. i is shared for two tasks based on implicit data-sharing attribute rules. */ #include <assert.h> int main() { int i=0; #pragma omp parallel #pragma omp single { #pragma omp task depend (out:i) i = 1; #pragma omp task depend (out:i) i = 2; } assert (i==2); return 0; }
StmtOpenMP.h
//===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// \file /// This file defines OpenMP AST classes for executable directives and /// clauses. /// //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMTOPENMP_H #define LLVM_CLANG_AST_STMTOPENMP_H #include "clang/AST/Expr.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/Stmt.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" namespace clang { //===----------------------------------------------------------------------===// // AST classes for directives. //===----------------------------------------------------------------------===// /// This is a basic class for representing single OpenMP executable /// directive. /// class OMPExecutableDirective : public Stmt { friend class ASTStmtReader; /// Kind of the directive. OpenMPDirectiveKind Kind; /// Starting location of the directive (directive keyword). SourceLocation StartLoc; /// Ending location of the directive. SourceLocation EndLoc; /// Numbers of clauses. const unsigned NumClauses; /// Number of child expressions/stmts. const unsigned NumChildren; /// Offset from this to the start of clauses. /// There are NumClauses pointers to clauses, they are followed by /// NumChildren pointers to child stmts/exprs (if the directive type /// requires an associated stmt, then it has to be the first of them). const unsigned ClausesOffset; /// Get the clauses storage. MutableArrayRef<OMPClause *> getClauses() { OMPClause **ClauseStorage = reinterpret_cast<OMPClause **>( reinterpret_cast<char *>(this) + ClausesOffset); return MutableArrayRef<OMPClause *>(ClauseStorage, NumClauses); } protected: /// Build instance of directive of class \a K. /// /// \param SC Statement class. /// \param K Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// template <typename T> OMPExecutableDirective(const T *, StmtClass SC, OpenMPDirectiveKind K, SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses, unsigned NumChildren) : Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)), EndLoc(std::move(EndLoc)), NumClauses(NumClauses), NumChildren(NumChildren), ClausesOffset(llvm::alignTo(sizeof(T), alignof(OMPClause *))) {} /// Sets the list of variables for this clause. /// /// \param Clauses The list of clauses for the directive. /// void setClauses(ArrayRef<OMPClause *> Clauses); /// Set the associated statement for the directive. /// /// /param S Associated statement. /// void setAssociatedStmt(Stmt *S) { assert(hasAssociatedStmt() && "no associated statement."); *child_begin() = S; } public: /// Iterates over a filtered subrange of clauses applied to a /// directive. /// /// This iterator visits only clauses of type SpecificClause. template <typename SpecificClause> class specific_clause_iterator : public llvm::iterator_adaptor_base< specific_clause_iterator<SpecificClause>, ArrayRef<OMPClause *>::const_iterator, std::forward_iterator_tag, const SpecificClause *, ptrdiff_t, const SpecificClause *, const SpecificClause *> { ArrayRef<OMPClause *>::const_iterator End; void SkipToNextClause() { while (this->I != End && !isa<SpecificClause>(*this->I)) ++this->I; } public: explicit specific_clause_iterator(ArrayRef<OMPClause *> Clauses) : specific_clause_iterator::iterator_adaptor_base(Clauses.begin()), End(Clauses.end()) { SkipToNextClause(); } const SpecificClause *operator*() const { return cast<SpecificClause>(*this->I); } const SpecificClause *operator->() const { return **this; } specific_clause_iterator &operator++() { ++this->I; SkipToNextClause(); return *this; } }; template <typename SpecificClause> static llvm::iterator_range<specific_clause_iterator<SpecificClause>> getClausesOfKind(ArrayRef<OMPClause *> Clauses) { return {specific_clause_iterator<SpecificClause>(Clauses), specific_clause_iterator<SpecificClause>( llvm::makeArrayRef(Clauses.end(), 0))}; } template <typename SpecificClause> llvm::iterator_range<specific_clause_iterator<SpecificClause>> getClausesOfKind() const { return getClausesOfKind<SpecificClause>(clauses()); } /// Gets a single clause of the specified kind associated with the /// current directive iff there is only one clause of this kind (and assertion /// is fired if there is more than one clause is associated with the /// directive). Returns nullptr if no clause of this kind is associated with /// the directive. template <typename SpecificClause> const SpecificClause *getSingleClause() const { auto Clauses = getClausesOfKind<SpecificClause>(); if (Clauses.begin() != Clauses.end()) { assert(std::next(Clauses.begin()) == Clauses.end() && "There are at least 2 clauses of the specified kind"); return *Clauses.begin(); } return nullptr; } /// Returns true if the current directive has one or more clauses of a /// specific kind. template <typename SpecificClause> bool hasClausesOfKind() const { auto Clauses = getClausesOfKind<SpecificClause>(); return Clauses.begin() != Clauses.end(); } /// Returns starting location of directive kind. SourceLocation getBeginLoc() const { return StartLoc; } /// Returns ending location of directive. SourceLocation getEndLoc() const { return EndLoc; } /// Set starting location of directive kind. /// /// \param Loc New starting location of directive. /// void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// Set ending location of directive. /// /// \param Loc New ending location of directive. /// void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// Get number of clauses. unsigned getNumClauses() const { return NumClauses; } /// Returns specified clause. /// /// \param i Number of clause. /// OMPClause *getClause(unsigned i) const { return clauses()[i]; } /// Returns true if directive has associated statement. bool hasAssociatedStmt() const { return NumChildren > 0; } /// Returns statement associated with the directive. const Stmt *getAssociatedStmt() const { assert(hasAssociatedStmt() && "no associated statement."); return *child_begin(); } Stmt *getAssociatedStmt() { assert(hasAssociatedStmt() && "no associated statement."); return *child_begin(); } /// Returns the captured statement associated with the /// component region within the (combined) directive. // // \param RegionKind Component region kind. const CapturedStmt *getCapturedStmt(OpenMPDirectiveKind RegionKind) const { SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind()); assert(std::any_of( CaptureRegions.begin(), CaptureRegions.end(), [=](const OpenMPDirectiveKind K) { return K == RegionKind; }) && "RegionKind not found in OpenMP CaptureRegions."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (auto ThisCaptureRegion : CaptureRegions) { if (ThisCaptureRegion == RegionKind) return CS; CS = cast<CapturedStmt>(CS->getCapturedStmt()); } llvm_unreachable("Incorrect RegionKind specified for directive."); } /// Get innermost captured statement for the construct. CapturedStmt *getInnermostCapturedStmt() { assert(hasAssociatedStmt() && getAssociatedStmt() && "Must have associated statement."); SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind()); assert(!CaptureRegions.empty() && "At least one captured statement must be provided."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (unsigned Level = CaptureRegions.size(); Level > 1; --Level) CS = cast<CapturedStmt>(CS->getCapturedStmt()); return CS; } const CapturedStmt *getInnermostCapturedStmt() const { return const_cast<OMPExecutableDirective *>(this) ->getInnermostCapturedStmt(); } OpenMPDirectiveKind getDirectiveKind() const { return Kind; } static bool classof(const Stmt *S) { return S->getStmtClass() >= firstOMPExecutableDirectiveConstant && S->getStmtClass() <= lastOMPExecutableDirectiveConstant; } child_range children() { if (!hasAssociatedStmt()) return child_range(child_iterator(), child_iterator()); Stmt **ChildStorage = reinterpret_cast<Stmt **>(getClauses().end()); /// Do not mark all the special expression/statements as children, except /// for the associated statement. return child_range(ChildStorage, ChildStorage + 1); } const_child_range children() const { if (!hasAssociatedStmt()) return const_child_range(const_child_iterator(), const_child_iterator()); Stmt **ChildStorage = reinterpret_cast<Stmt **>( const_cast<OMPExecutableDirective *>(this)->getClauses().end()); return const_child_range(ChildStorage, ChildStorage + 1); } ArrayRef<OMPClause *> clauses() { return getClauses(); } ArrayRef<OMPClause *> clauses() const { return const_cast<OMPExecutableDirective *>(this)->getClauses(); } /// Returns whether or not this is a Standalone directive. /// /// Stand-alone directives are executable directives /// that have no associated user code. bool isStandaloneDirective() const; /// Returns the AST node representing OpenMP structured-block of this /// OpenMP executable directive, /// Prerequisite: Executable Directive must not be Standalone directive. const Stmt *getStructuredBlock() const; Stmt *getStructuredBlock() { return const_cast<Stmt *>( const_cast<const OMPExecutableDirective *>(this)->getStructuredBlock()); } }; /// This represents '#pragma omp parallel' directive. /// /// \code /// #pragma omp parallel private(a,b) reduction(+: c,d) /// \endcode /// In this example directive '#pragma omp parallel' has clauses 'private' /// with the variables 'a' and 'b' and 'reduction' with operator '+' and /// variables 'c' and 'd'. /// class OMPParallelDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending Location of the directive. /// OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPParallelDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement associated with the directive. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPParallelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelDirectiveClass; } }; /// This is a common base class for loop directives ('omp simd', 'omp /// for', 'omp for simd' etc.). It is responsible for the loop code generation. /// class OMPLoopDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Number of collapsed loops as specified by 'collapse' clause. unsigned CollapsedNum; /// Offsets to the stored exprs. /// This enumeration contains offsets to all the pointers to children /// expressions stored in OMPLoopDirective. /// The first 9 children are necessary for all the loop directives, /// the next 8 are specific to the worksharing ones, and the next 11 are /// used for combined constructs containing two pragmas associated to loops. /// After the fixed children, three arrays of length CollapsedNum are /// allocated: loop counters, their updates and final values. /// PrevLowerBound and PrevUpperBound are used to communicate blocking /// information in composite constructs which require loop blocking /// DistInc is used to generate the increment expression for the distribute /// loop when combined with a further nested loop /// PrevEnsureUpperBound is used as the EnsureUpperBound expression for the /// for loop when combined with a previous distribute loop in the same pragma /// (e.g. 'distribute parallel for') /// enum { AssociatedStmtOffset = 0, IterationVariableOffset = 1, LastIterationOffset = 2, CalcLastIterationOffset = 3, PreConditionOffset = 4, CondOffset = 5, InitOffset = 6, IncOffset = 7, PreInitsOffset = 8, // The '...End' enumerators do not correspond to child expressions - they // specify the offset to the end (and start of the following counters/ // updates/finals arrays). DefaultEnd = 9, // The following 8 exprs are used by worksharing and distribute loops only. IsLastIterVariableOffset = 9, LowerBoundVariableOffset = 10, UpperBoundVariableOffset = 11, StrideVariableOffset = 12, EnsureUpperBoundOffset = 13, NextLowerBoundOffset = 14, NextUpperBoundOffset = 15, NumIterationsOffset = 16, // Offset to the end for worksharing loop directives. WorksharingEnd = 17, PrevLowerBoundVariableOffset = 17, PrevUpperBoundVariableOffset = 18, DistIncOffset = 19, PrevEnsureUpperBoundOffset = 20, CombinedLowerBoundVariableOffset = 21, CombinedUpperBoundVariableOffset = 22, CombinedEnsureUpperBoundOffset = 23, CombinedInitOffset = 24, CombinedConditionOffset = 25, CombinedNextLowerBoundOffset = 26, CombinedNextUpperBoundOffset = 27, CombinedDistConditionOffset = 28, CombinedParForInDistConditionOffset = 29, // Offset to the end (and start of the following counters/updates/finals // arrays) for combined distribute loop directives. CombinedDistributeEnd = 30, }; /// Get the counters storage. MutableArrayRef<Expr *> getCounters() { Expr **Storage = reinterpret_cast<Expr **>( &(*(std::next(child_begin(), getArraysOffset(getDirectiveKind()))))); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the private counters storage. MutableArrayRef<Expr *> getPrivateCounters() { Expr **Storage = reinterpret_cast<Expr **>(&*std::next( child_begin(), getArraysOffset(getDirectiveKind()) + CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the updates storage. MutableArrayRef<Expr *> getInits() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 2 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the updates storage. MutableArrayRef<Expr *> getUpdates() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 3 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the final counter updates storage. MutableArrayRef<Expr *> getFinals() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 4 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } protected: /// Build instance of loop directive of class \a Kind. /// /// \param SC Statement class. /// \param Kind Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed loops from 'collapse' clause. /// \param NumClauses Number of clauses. /// \param NumSpecialChildren Number of additional directive-specific stmts. /// template <typename T> OMPLoopDirective(const T *That, StmtClass SC, OpenMPDirectiveKind Kind, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses, unsigned NumSpecialChildren = 0) : OMPExecutableDirective(That, SC, Kind, StartLoc, EndLoc, NumClauses, numLoopChildren(CollapsedNum, Kind) + NumSpecialChildren), CollapsedNum(CollapsedNum) {} /// Offset to the start of children expression arrays. static unsigned getArraysOffset(OpenMPDirectiveKind Kind) { if (isOpenMPLoopBoundSharingDirective(Kind)) return CombinedDistributeEnd; if (isOpenMPWorksharingDirective(Kind) || isOpenMPTaskLoopDirective(Kind) || isOpenMPDistributeDirective(Kind)) return WorksharingEnd; return DefaultEnd; } /// Children number. static unsigned numLoopChildren(unsigned CollapsedNum, OpenMPDirectiveKind Kind) { return getArraysOffset(Kind) + 5 * CollapsedNum; // Counters, // PrivateCounters, Inits, // Updates and Finals } void setIterationVariable(Expr *IV) { *std::next(child_begin(), IterationVariableOffset) = IV; } void setLastIteration(Expr *LI) { *std::next(child_begin(), LastIterationOffset) = LI; } void setCalcLastIteration(Expr *CLI) { *std::next(child_begin(), CalcLastIterationOffset) = CLI; } void setPreCond(Expr *PC) { *std::next(child_begin(), PreConditionOffset) = PC; } void setCond(Expr *Cond) { *std::next(child_begin(), CondOffset) = Cond; } void setInit(Expr *Init) { *std::next(child_begin(), InitOffset) = Init; } void setInc(Expr *Inc) { *std::next(child_begin(), IncOffset) = Inc; } void setPreInits(Stmt *PreInits) { *std::next(child_begin(), PreInitsOffset) = PreInits; } void setIsLastIterVariable(Expr *IL) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), IsLastIterVariableOffset) = IL; } void setLowerBoundVariable(Expr *LB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), LowerBoundVariableOffset) = LB; } void setUpperBoundVariable(Expr *UB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), UpperBoundVariableOffset) = UB; } void setStrideVariable(Expr *ST) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), StrideVariableOffset) = ST; } void setEnsureUpperBound(Expr *EUB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), EnsureUpperBoundOffset) = EUB; } void setNextLowerBound(Expr *NLB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), NextLowerBoundOffset) = NLB; } void setNextUpperBound(Expr *NUB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), NextUpperBoundOffset) = NUB; } void setNumIterations(Expr *NI) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), NumIterationsOffset) = NI; } void setPrevLowerBoundVariable(Expr *PrevLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), PrevLowerBoundVariableOffset) = PrevLB; } void setPrevUpperBoundVariable(Expr *PrevUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), PrevUpperBoundVariableOffset) = PrevUB; } void setDistInc(Expr *DistInc) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), DistIncOffset) = DistInc; } void setPrevEnsureUpperBound(Expr *PrevEUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), PrevEnsureUpperBoundOffset) = PrevEUB; } void setCombinedLowerBoundVariable(Expr *CombLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedLowerBoundVariableOffset) = CombLB; } void setCombinedUpperBoundVariable(Expr *CombUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedUpperBoundVariableOffset) = CombUB; } void setCombinedEnsureUpperBound(Expr *CombEUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedEnsureUpperBoundOffset) = CombEUB; } void setCombinedInit(Expr *CombInit) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedInitOffset) = CombInit; } void setCombinedCond(Expr *CombCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedConditionOffset) = CombCond; } void setCombinedNextLowerBound(Expr *CombNLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedNextLowerBoundOffset) = CombNLB; } void setCombinedNextUpperBound(Expr *CombNUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedNextUpperBoundOffset) = CombNUB; } void setCombinedDistCond(Expr *CombDistCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); *std::next(child_begin(), CombinedDistConditionOffset) = CombDistCond; } void setCombinedParForInDistCond(Expr *CombParForInDistCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); *std::next(child_begin(), CombinedParForInDistConditionOffset) = CombParForInDistCond; } void setCounters(ArrayRef<Expr *> A); void setPrivateCounters(ArrayRef<Expr *> A); void setInits(ArrayRef<Expr *> A); void setUpdates(ArrayRef<Expr *> A); void setFinals(ArrayRef<Expr *> A); public: /// The expressions built to support OpenMP loops in combined/composite /// pragmas (e.g. pragma omp distribute parallel for) struct DistCombinedHelperExprs { /// DistributeLowerBound - used when composing 'omp distribute' with /// 'omp for' in a same construct. Expr *LB; /// DistributeUpperBound - used when composing 'omp distribute' with /// 'omp for' in a same construct. Expr *UB; /// DistributeEnsureUpperBound - used when composing 'omp distribute' /// with 'omp for' in a same construct, EUB depends on DistUB Expr *EUB; /// Distribute loop iteration variable init used when composing 'omp /// distribute' /// with 'omp for' in a same construct Expr *Init; /// Distribute Loop condition used when composing 'omp distribute' /// with 'omp for' in a same construct Expr *Cond; /// Update of LowerBound for statically scheduled omp loops for /// outer loop in combined constructs (e.g. 'distribute parallel for') Expr *NLB; /// Update of UpperBound for statically scheduled omp loops for /// outer loop in combined constructs (e.g. 'distribute parallel for') Expr *NUB; /// Distribute Loop condition used when composing 'omp distribute' /// with 'omp for' in a same construct when schedule is chunked. Expr *DistCond; /// 'omp parallel for' loop condition used when composed with /// 'omp distribute' in the same construct and when schedule is /// chunked and the chunk size is 1. Expr *ParForInDistCond; }; /// The expressions built for the OpenMP loop CodeGen for the /// whole collapsed loop nest. struct HelperExprs { /// Loop iteration variable. Expr *IterationVarRef; /// Loop last iteration number. Expr *LastIteration; /// Loop number of iterations. Expr *NumIterations; /// Calculation of last iteration. Expr *CalcLastIteration; /// Loop pre-condition. Expr *PreCond; /// Loop condition. Expr *Cond; /// Loop iteration variable init. Expr *Init; /// Loop increment. Expr *Inc; /// IsLastIteration - local flag variable passed to runtime. Expr *IL; /// LowerBound - local variable passed to runtime. Expr *LB; /// UpperBound - local variable passed to runtime. Expr *UB; /// Stride - local variable passed to runtime. Expr *ST; /// EnsureUpperBound -- expression UB = min(UB, NumIterations). Expr *EUB; /// Update of LowerBound for statically scheduled 'omp for' loops. Expr *NLB; /// Update of UpperBound for statically scheduled 'omp for' loops. Expr *NUB; /// PreviousLowerBound - local variable passed to runtime in the /// enclosing schedule or null if that does not apply. Expr *PrevLB; /// PreviousUpperBound - local variable passed to runtime in the /// enclosing schedule or null if that does not apply. Expr *PrevUB; /// DistInc - increment expression for distribute loop when found /// combined with a further loop level (e.g. in 'distribute parallel for') /// expression IV = IV + ST Expr *DistInc; /// PrevEUB - expression similar to EUB but to be used when loop /// scheduling uses PrevLB and PrevUB (e.g. in 'distribute parallel for' /// when ensuring that the UB is either the calculated UB by the runtime or /// the end of the assigned distribute chunk) /// expression UB = min (UB, PrevUB) Expr *PrevEUB; /// Counters Loop counters. SmallVector<Expr *, 4> Counters; /// PrivateCounters Loop counters. SmallVector<Expr *, 4> PrivateCounters; /// Expressions for loop counters inits for CodeGen. SmallVector<Expr *, 4> Inits; /// Expressions for loop counters update for CodeGen. SmallVector<Expr *, 4> Updates; /// Final loop counter values for GodeGen. SmallVector<Expr *, 4> Finals; /// Init statement for all captured expressions. Stmt *PreInits; /// Expressions used when combining OpenMP loop pragmas DistCombinedHelperExprs DistCombinedFields; /// Check if all the expressions are built (does not check the /// worksharing ones). bool builtAll() { return IterationVarRef != nullptr && LastIteration != nullptr && NumIterations != nullptr && PreCond != nullptr && Cond != nullptr && Init != nullptr && Inc != nullptr; } /// Initialize all the fields to null. /// \param Size Number of elements in the counters/finals/updates arrays. void clear(unsigned Size) { IterationVarRef = nullptr; LastIteration = nullptr; CalcLastIteration = nullptr; PreCond = nullptr; Cond = nullptr; Init = nullptr; Inc = nullptr; IL = nullptr; LB = nullptr; UB = nullptr; ST = nullptr; EUB = nullptr; NLB = nullptr; NUB = nullptr; NumIterations = nullptr; PrevLB = nullptr; PrevUB = nullptr; DistInc = nullptr; PrevEUB = nullptr; Counters.resize(Size); PrivateCounters.resize(Size); Inits.resize(Size); Updates.resize(Size); Finals.resize(Size); for (unsigned i = 0; i < Size; ++i) { Counters[i] = nullptr; PrivateCounters[i] = nullptr; Inits[i] = nullptr; Updates[i] = nullptr; Finals[i] = nullptr; } PreInits = nullptr; DistCombinedFields.LB = nullptr; DistCombinedFields.UB = nullptr; DistCombinedFields.EUB = nullptr; DistCombinedFields.Init = nullptr; DistCombinedFields.Cond = nullptr; DistCombinedFields.NLB = nullptr; DistCombinedFields.NUB = nullptr; DistCombinedFields.DistCond = nullptr; DistCombinedFields.ParForInDistCond = nullptr; } }; /// Get number of collapsed loops. unsigned getCollapsedNumber() const { return CollapsedNum; } Expr *getIterationVariable() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), IterationVariableOffset))); } Expr *getLastIteration() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), LastIterationOffset))); } Expr *getCalcLastIteration() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CalcLastIterationOffset))); } Expr *getPreCond() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PreConditionOffset))); } Expr *getCond() const { return const_cast<Expr *>( reinterpret_cast<const Expr *>(*std::next(child_begin(), CondOffset))); } Expr *getInit() const { return const_cast<Expr *>( reinterpret_cast<const Expr *>(*std::next(child_begin(), InitOffset))); } Expr *getInc() const { return const_cast<Expr *>( reinterpret_cast<const Expr *>(*std::next(child_begin(), IncOffset))); } const Stmt *getPreInits() const { return *std::next(child_begin(), PreInitsOffset); } Stmt *getPreInits() { return *std::next(child_begin(), PreInitsOffset); } Expr *getIsLastIterVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), IsLastIterVariableOffset))); } Expr *getLowerBoundVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), LowerBoundVariableOffset))); } Expr *getUpperBoundVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), UpperBoundVariableOffset))); } Expr *getStrideVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), StrideVariableOffset))); } Expr *getEnsureUpperBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), EnsureUpperBoundOffset))); } Expr *getNextLowerBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), NextLowerBoundOffset))); } Expr *getNextUpperBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), NextUpperBoundOffset))); } Expr *getNumIterations() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), NumIterationsOffset))); } Expr *getPrevLowerBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PrevLowerBoundVariableOffset))); } Expr *getPrevUpperBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PrevUpperBoundVariableOffset))); } Expr *getDistInc() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), DistIncOffset))); } Expr *getPrevEnsureUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PrevEnsureUpperBoundOffset))); } Expr *getCombinedLowerBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedLowerBoundVariableOffset))); } Expr *getCombinedUpperBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedUpperBoundVariableOffset))); } Expr *getCombinedEnsureUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedEnsureUpperBoundOffset))); } Expr *getCombinedInit() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedInitOffset))); } Expr *getCombinedCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedConditionOffset))); } Expr *getCombinedNextLowerBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedNextLowerBoundOffset))); } Expr *getCombinedNextUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedNextUpperBoundOffset))); } Expr *getCombinedDistCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedDistConditionOffset))); } Expr *getCombinedParForInDistCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedParForInDistConditionOffset))); } const Stmt *getBody() const { // This relies on the loop form is already checked by Sema. const Stmt *Body = getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(); Body = cast<ForStmt>(Body)->getBody(); for (unsigned Cnt = 1; Cnt < CollapsedNum; ++Cnt) { Body = Body->IgnoreContainers(); Body = cast<ForStmt>(Body)->getBody(); } return Body; } ArrayRef<Expr *> counters() { return getCounters(); } ArrayRef<Expr *> counters() const { return const_cast<OMPLoopDirective *>(this)->getCounters(); } ArrayRef<Expr *> private_counters() { return getPrivateCounters(); } ArrayRef<Expr *> private_counters() const { return const_cast<OMPLoopDirective *>(this)->getPrivateCounters(); } ArrayRef<Expr *> inits() { return getInits(); } ArrayRef<Expr *> inits() const { return const_cast<OMPLoopDirective *>(this)->getInits(); } ArrayRef<Expr *> updates() { return getUpdates(); } ArrayRef<Expr *> updates() const { return const_cast<OMPLoopDirective *>(this)->getUpdates(); } ArrayRef<Expr *> finals() { return getFinals(); } ArrayRef<Expr *> finals() const { return const_cast<OMPLoopDirective *>(this)->getFinals(); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSimdDirectiveClass || T->getStmtClass() == OMPForDirectiveClass || T->getStmtClass() == OMPForSimdDirectiveClass || T->getStmtClass() == OMPParallelForDirectiveClass || T->getStmtClass() == OMPParallelForSimdDirectiveClass || T->getStmtClass() == OMPTaskLoopDirectiveClass || T->getStmtClass() == OMPTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPDistributeDirectiveClass || T->getStmtClass() == OMPTargetParallelForDirectiveClass || T->getStmtClass() == OMPDistributeParallelForDirectiveClass || T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPDistributeSimdDirectiveClass || T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass || T->getStmtClass() == OMPTargetSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeDirectiveClass || T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeParallelForDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp simd' directive. /// /// \code /// #pragma omp simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp simd' has clauses 'private' /// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and /// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'. /// class OMPSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPSimdDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSimdDirectiveClass; } }; /// This represents '#pragma omp for' directive. /// /// \code /// #pragma omp for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp for' has clauses 'private' with the /// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c' /// and 'd'. /// class OMPForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPForDirectiveClass; } }; /// This represents '#pragma omp for simd' directive. /// /// \code /// #pragma omp for simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp for simd' has clauses 'private' /// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and /// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'. /// class OMPForSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPForSimdDirectiveClass; } }; /// This represents '#pragma omp sections' directive. /// /// \code /// #pragma omp sections private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp sections' has clauses 'private' with /// the variables 'a' and 'b' and 'reduction' with operator '+' and variables /// 'c' and 'd'. /// class OMPSectionsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPSectionsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner directive. /// static OMPSectionsDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPSectionsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSectionsDirectiveClass; } }; /// This represents '#pragma omp section' directive. /// /// \code /// #pragma omp section /// \endcode /// class OMPSectionDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section, StartLoc, EndLoc, 0, 1), HasCancel(false) {} /// Build an empty directive. /// explicit OMPSectionDirective() : OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section, SourceLocation(), SourceLocation(), 0, 1), HasCancel(false) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner directive. /// static OMPSectionDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive. /// /// \param C AST context. /// static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell); /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSectionDirectiveClass; } }; /// This represents '#pragma omp single' directive. /// /// \code /// #pragma omp single private(a,b) copyprivate(c,d) /// \endcode /// In this example directive '#pragma omp single' has clauses 'private' with /// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'. /// class OMPSingleDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPSingleDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPSingleDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPSingleDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSingleDirectiveClass; } }; /// This represents '#pragma omp master' directive. /// /// \code /// #pragma omp master /// \endcode /// class OMPMasterDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master, StartLoc, EndLoc, 0, 1) {} /// Build an empty directive. /// explicit OMPMasterDirective() : OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master, SourceLocation(), SourceLocation(), 0, 1) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPMasterDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterDirectiveClass; } }; /// This represents '#pragma omp critical' directive. /// /// \code /// #pragma omp critical /// \endcode /// class OMPCriticalDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Name of the directive. DeclarationNameInfo DirName; /// Build directive with the given start and end location. /// /// \param Name Name of the directive. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical, StartLoc, EndLoc, NumClauses, 1), DirName(Name) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPCriticalDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical, SourceLocation(), SourceLocation(), NumClauses, 1), DirName() {} /// Set name of the directive. /// /// \param Name Name of the directive. /// void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; } public: /// Creates directive. /// /// \param C AST context. /// \param Name Name of the directive. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPCriticalDirective * Create(const ASTContext &C, const DeclarationNameInfo &Name, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPCriticalDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return name of the directive. /// DeclarationNameInfo getDirectiveName() const { return DirName; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCriticalDirectiveClass; } }; /// This represents '#pragma omp parallel for' directive. /// /// \code /// #pragma omp parallel for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel for' has clauses 'private' /// with the variables 'a' and 'b' and 'reduction' with operator '+' and /// variables 'c' and 'd'. /// class OMPParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if current region has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelForDirectiveClass; } }; /// This represents '#pragma omp parallel for simd' directive. /// /// \code /// #pragma omp parallel for simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel for simd' has clauses /// 'private' with the variables 'a' and 'b', 'linear' with variables 'i', 'j' /// and linear step 's', 'reduction' with operator '+' and variables 'c' and /// 'd'. /// class OMPParallelForSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForSimdDirectiveClass, OMPD_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForSimdDirectiveClass, OMPD_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp parallel sections' directive. /// /// \code /// #pragma omp parallel sections private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel sections' has clauses /// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+' /// and variables 'c' and 'd'. /// class OMPParallelSectionsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass, OMPD_parallel_sections, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPParallelSectionsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass, OMPD_parallel_sections, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPParallelSectionsDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelSectionsDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelSectionsDirectiveClass; } }; /// This represents '#pragma omp task' directive. /// /// \code /// #pragma omp task private(a,b) final(d) /// \endcode /// In this example directive '#pragma omp task' has clauses 'private' with the /// variables 'a' and 'b' and 'final' with condition 'd'. /// class OMPTaskDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if this directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTaskDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true, if current directive has inner cancel directive. /// static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskDirectiveClass; } }; /// This represents '#pragma omp taskyield' directive. /// /// \code /// #pragma omp taskyield /// \endcode /// class OMPTaskyieldDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield, StartLoc, EndLoc, 0, 0) {} /// Build an empty directive. /// explicit OMPTaskyieldDirective() : OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield, SourceLocation(), SourceLocation(), 0, 0) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPTaskyieldDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskyieldDirectiveClass; } }; /// This represents '#pragma omp barrier' directive. /// /// \code /// #pragma omp barrier /// \endcode /// class OMPBarrierDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier, StartLoc, EndLoc, 0, 0) {} /// Build an empty directive. /// explicit OMPBarrierDirective() : OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier, SourceLocation(), SourceLocation(), 0, 0) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPBarrierDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPBarrierDirectiveClass; } }; /// This represents '#pragma omp taskwait' directive. /// /// \code /// #pragma omp taskwait /// \endcode /// class OMPTaskwaitDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait, StartLoc, EndLoc, 0, 0) {} /// Build an empty directive. /// explicit OMPTaskwaitDirective() : OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait, SourceLocation(), SourceLocation(), 0, 0) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPTaskwaitDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskwaitDirectiveClass; } }; /// This represents '#pragma omp taskgroup' directive. /// /// \code /// #pragma omp taskgroup /// \endcode /// class OMPTaskgroupDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTaskgroupDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, OMPD_taskgroup, StartLoc, EndLoc, NumClauses, 2) {} /// Build an empty directive. /// \param NumClauses Number of clauses. /// explicit OMPTaskgroupDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, OMPD_taskgroup, SourceLocation(), SourceLocation(), NumClauses, 2) {} /// Sets the task_reduction return variable. void setReductionRef(Expr *RR) { *std::next(child_begin(), 1) = RR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param ReductionRef Reference to the task_reduction return variable. /// static OMPTaskgroupDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *ReductionRef); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTaskgroupDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns reference to the task_reduction return variable. const Expr *getReductionRef() const { return static_cast<const Expr *>(*std::next(child_begin(), 1)); } Expr *getReductionRef() { return static_cast<Expr *>(*std::next(child_begin(), 1)); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskgroupDirectiveClass; } }; /// This represents '#pragma omp flush' directive. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has 2 arguments- variables 'a' /// and 'b'. /// 'omp flush' directive does not have clauses but have an optional list of /// variables to flush. This list of variables is stored within some fake clause /// FlushClause. class OMPFlushDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush, StartLoc, EndLoc, NumClauses, 0) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPFlushDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush, SourceLocation(), SourceLocation(), NumClauses, 0) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses (only single OMPFlushClause clause is /// allowed). /// static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPFlushDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPFlushDirectiveClass; } }; /// This represents '#pragma omp ordered' directive. /// /// \code /// #pragma omp ordered /// \endcode /// class OMPOrderedDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPOrderedDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPOrderedDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPOrderedDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPOrderedDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPOrderedDirectiveClass; } }; /// This represents '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has clause 'capture'. /// class OMPAtomicDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Used for 'atomic update' or 'atomic capture' constructs. They may /// have atomic expressions of forms /// \code /// x = x binop expr; /// x = expr binop x; /// \endcode /// This field is true for the first form of the expression and false for the /// second. Required for correct codegen of non-associative operations (like /// << or >>). bool IsXLHSInRHSPart; /// Used for 'atomic update' or 'atomic capture' constructs. They may /// have atomic expressions of forms /// \code /// v = x; <update x>; /// <update x>; v = x; /// \endcode /// This field is true for the first(postfix) form of the expression and false /// otherwise. bool IsPostfixUpdate; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic, StartLoc, EndLoc, NumClauses, 5), IsXLHSInRHSPart(false), IsPostfixUpdate(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPAtomicDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic, SourceLocation(), SourceLocation(), NumClauses, 5), IsXLHSInRHSPart(false), IsPostfixUpdate(false) {} /// Set 'x' part of the associated expression/statement. void setX(Expr *X) { *std::next(child_begin()) = X; } /// Set helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. void setUpdateExpr(Expr *UE) { *std::next(child_begin(), 2) = UE; } /// Set 'v' part of the associated expression/statement. void setV(Expr *V) { *std::next(child_begin(), 3) = V; } /// Set 'expr' part of the associated expression/statement. void setExpr(Expr *E) { *std::next(child_begin(), 4) = E; } public: /// Creates directive with a list of \a Clauses and 'x', 'v' and 'expr' /// parts of the atomic construct (see Section 2.12.6, atomic Construct, for /// detailed description of 'x', 'v' and 'expr'). /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param X 'x' part of the associated expression/statement. /// \param V 'v' part of the associated expression/statement. /// \param E 'expr' part of the associated expression/statement. /// \param UE Helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. /// \param IsXLHSInRHSPart true if \a UE has the first form and false if the /// second. /// \param IsPostfixUpdate true if original value of 'x' must be stored in /// 'v', not an updated one. static OMPAtomicDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V, Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPAtomicDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Get 'x' part of the associated expression/statement. Expr *getX() { return cast_or_null<Expr>(*std::next(child_begin())); } const Expr *getX() const { return cast_or_null<Expr>(*std::next(child_begin())); } /// Get helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. Expr *getUpdateExpr() { return cast_or_null<Expr>(*std::next(child_begin(), 2)); } const Expr *getUpdateExpr() const { return cast_or_null<Expr>(*std::next(child_begin(), 2)); } /// Return true if helper update expression has form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' and false if it has form /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; } /// Return true if 'v' expression must be updated to original value of /// 'x', false if 'v' must be updated to the new value of 'x'. bool isPostfixUpdate() const { return IsPostfixUpdate; } /// Get 'v' part of the associated expression/statement. Expr *getV() { return cast_or_null<Expr>(*std::next(child_begin(), 3)); } const Expr *getV() const { return cast_or_null<Expr>(*std::next(child_begin(), 3)); } /// Get 'expr' part of the associated expression/statement. Expr *getExpr() { return cast_or_null<Expr>(*std::next(child_begin(), 4)); } const Expr *getExpr() const { return cast_or_null<Expr>(*std::next(child_begin(), 4)); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPAtomicDirectiveClass; } }; /// This represents '#pragma omp target' directive. /// /// \code /// #pragma omp target if(a) /// \endcode /// In this example directive '#pragma omp target' has clause 'if' with /// condition 'a'. /// class OMPTargetDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTargetDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetDirectiveClass; } }; /// This represents '#pragma omp target data' directive. /// /// \code /// #pragma omp target data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target data' has clauses 'device' /// with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetDataDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDataDirectiveClass, OMPD_target_data, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetDataDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDataDirectiveClass, OMPD_target_data, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetDataDirectiveClass; } }; /// This represents '#pragma omp target enter data' directive. /// /// \code /// #pragma omp target enter data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target enter data' has clauses /// 'device' with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetEnterDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetEnterDataDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetEnterDataDirectiveClass, OMPD_target_enter_data, StartLoc, EndLoc, NumClauses, /*NumChildren=*/1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetEnterDataDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetEnterDataDirectiveClass, OMPD_target_enter_data, SourceLocation(), SourceLocation(), NumClauses, /*NumChildren=*/1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetEnterDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetEnterDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetEnterDataDirectiveClass; } }; /// This represents '#pragma omp target exit data' directive. /// /// \code /// #pragma omp target exit data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target exit data' has clauses /// 'device' with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetExitDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetExitDataDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetExitDataDirectiveClass, OMPD_target_exit_data, StartLoc, EndLoc, NumClauses, /*NumChildren=*/1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetExitDataDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetExitDataDirectiveClass, OMPD_target_exit_data, SourceLocation(), SourceLocation(), NumClauses, /*NumChildren=*/1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetExitDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetExitDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetExitDataDirectiveClass; } }; /// This represents '#pragma omp target parallel' directive. /// /// \code /// #pragma omp target parallel if(a) /// \endcode /// In this example directive '#pragma omp target parallel' has clause 'if' with /// condition 'a'. /// class OMPTargetParallelDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTargetParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetParallelDirectiveClass, OMPD_target_parallel, StartLoc, EndLoc, NumClauses, /*NumChildren=*/1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetParallelDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetParallelDirectiveClass, OMPD_target_parallel, SourceLocation(), SourceLocation(), NumClauses, /*NumChildren=*/1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetParallelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetParallelDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelDirectiveClass; } }; /// This represents '#pragma omp target parallel for' directive. /// /// \code /// #pragma omp target parallel for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp target parallel for' has clauses /// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+' /// and variables 'c' and 'd'. /// class OMPTargetParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if current region has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForDirectiveClass, OMPD_target_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForDirectiveClass, OMPD_target_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPTargetParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelForDirectiveClass; } }; /// This represents '#pragma omp teams' directive. /// /// \code /// #pragma omp teams if(a) /// \endcode /// In this example directive '#pragma omp teams' has clause 'if' with /// condition 'a'. /// class OMPTeamsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTeamsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTeamsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDirectiveClass; } }; /// This represents '#pragma omp cancellation point' directive. /// /// \code /// #pragma omp cancellation point for /// \endcode /// /// In this example a cancellation point is created for innermost 'for' region. class OMPCancellationPointDirective : public OMPExecutableDirective { friend class ASTStmtReader; OpenMPDirectiveKind CancelRegion; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPCancellationPointDirectiveClass, OMPD_cancellation_point, StartLoc, EndLoc, 0, 0), CancelRegion(OMPD_unknown) {} /// Build an empty directive. /// explicit OMPCancellationPointDirective() : OMPExecutableDirective(this, OMPCancellationPointDirectiveClass, OMPD_cancellation_point, SourceLocation(), SourceLocation(), 0, 0), CancelRegion(OMPD_unknown) {} /// Set cancel region for current cancellation point. /// \param CR Cancellation region. void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPCancellationPointDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Creates an empty directive. /// /// \param C AST context. /// static OMPCancellationPointDirective *CreateEmpty(const ASTContext &C, EmptyShell); /// Get cancellation region for the current cancellation point. OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCancellationPointDirectiveClass; } }; /// This represents '#pragma omp cancel' directive. /// /// \code /// #pragma omp cancel for /// \endcode /// /// In this example a cancel is created for innermost 'for' region. class OMPCancelDirective : public OMPExecutableDirective { friend class ASTStmtReader; OpenMPDirectiveKind CancelRegion; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPCancelDirectiveClass, OMPD_cancel, StartLoc, EndLoc, NumClauses, 0), CancelRegion(OMPD_unknown) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. explicit OMPCancelDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPCancelDirectiveClass, OMPD_cancel, SourceLocation(), SourceLocation(), NumClauses, 0), CancelRegion(OMPD_unknown) {} /// Set cancel region for current cancellation point. /// \param CR Cancellation region. void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// static OMPCancelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, OpenMPDirectiveKind CancelRegion); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPCancelDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Get cancellation region for the current cancellation point. OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCancelDirectiveClass; } }; /// This represents '#pragma omp taskloop' directive. /// /// \code /// #pragma omp taskloop private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp taskloop' has clauses 'private' /// with the variables 'a' and 'b', 'grainsize' with expression 'val' and /// 'num_tasks' with expression 'num'. /// class OMPTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopDirectiveClass, OMPD_taskloop, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTaskLoopDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopDirectiveClass, OMPD_taskloop, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskLoopDirectiveClass; } }; /// This represents '#pragma omp taskloop simd' directive. /// /// \code /// #pragma omp taskloop simd private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp taskloop simd' has clauses 'private' /// with the variables 'a' and 'b', 'grainsize' with expression 'val' and /// 'num_tasks' with expression 'num'. /// class OMPTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopSimdDirectiveClass, OMPD_taskloop_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTaskLoopSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopSimdDirectiveClass, OMPD_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTaskLoopSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskLoopSimdDirectiveClass; } }; /// This represents '#pragma omp distribute' directive. /// /// \code /// #pragma omp distribute private(a,b) /// \endcode /// In this example directive '#pragma omp distribute' has clauses 'private' /// with the variables 'a' and 'b' /// class OMPDistributeDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeDirectiveClass, OMPD_distribute, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeDirectiveClass, OMPD_distribute, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeDirectiveClass; } }; /// This represents '#pragma omp target update' directive. /// /// \code /// #pragma omp target update to(a) from(b) device(1) /// \endcode /// In this example directive '#pragma omp target update' has clause 'to' with /// argument 'a', clause 'from' with argument 'b' and clause 'device' with /// argument '1'. /// class OMPTargetUpdateDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetUpdateDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetUpdateDirectiveClass, OMPD_target_update, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetUpdateDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetUpdateDirectiveClass, OMPD_target_update, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetUpdateDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses The number of clauses. /// static OMPTargetUpdateDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetUpdateDirectiveClass; } }; /// This represents '#pragma omp distribute parallel for' composite /// directive. /// /// \code /// #pragma omp distribute parallel for private(a,b) /// \endcode /// In this example directive '#pragma omp distribute parallel for' has clause /// 'private' with the variables 'a' and 'b' /// class OMPDistributeParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForDirectiveClass, OMPD_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForDirectiveClass, OMPD_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp distribute parallel for simd' composite /// directive. /// /// \code /// #pragma omp distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp distribute parallel for simd' has /// clause 'private' with the variables 'x' /// class OMPDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForSimdDirectiveClass, OMPD_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForSimdDirectiveClass, OMPD_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeParallelForSimdDirective *Create( const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeParallelForSimdDirective *CreateEmpty( const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp distribute simd' composite directive. /// /// \code /// #pragma omp distribute simd private(x) /// \endcode /// In this example directive '#pragma omp distribute simd' has clause /// 'private' with the variables 'x' /// class OMPDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeSimdDirectiveClass, OMPD_distribute_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeSimdDirectiveClass, OMPD_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp target parallel for simd' directive. /// /// \code /// #pragma omp target parallel for simd private(a) map(b) safelen(c) /// \endcode /// In this example directive '#pragma omp target parallel for simd' has clauses /// 'private' with the variable 'a', 'map' with the variable 'b' and 'safelen' /// with the variable 'c'. /// class OMPTargetParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForSimdDirectiveClass, OMPD_target_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForSimdDirectiveClass, OMPD_target_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetParallelForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp target simd' directive. /// /// \code /// #pragma omp target simd private(a) map(b) safelen(c) /// \endcode /// In this example directive '#pragma omp target simd' has clauses 'private' /// with the variable 'a', 'map' with the variable 'b' and 'safelen' with /// the variable 'c'. /// class OMPTargetSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetSimdDirectiveClass, OMPD_target_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetSimdDirectiveClass, OMPD_target_simd, SourceLocation(),SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute' directive. /// /// \code /// #pragma omp teams distribute private(a,b) /// \endcode /// In this example directive '#pragma omp teams distribute' has clauses /// 'private' with the variables 'a' and 'b' /// class OMPTeamsDistributeDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeDirectiveClass, OMPD_teams_distribute, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeDirectiveClass, OMPD_teams_distribute, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeDirectiveClass; } }; /// This represents '#pragma omp teams distribute simd' /// combined directive. /// /// \code /// #pragma omp teams distribute simd private(a,b) /// \endcode /// In this example directive '#pragma omp teams distribute simd' /// has clause 'private' with the variables 'a' and 'b' /// class OMPTeamsDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeSimdDirectiveClass, OMPD_teams_distribute_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeSimdDirectiveClass, OMPD_teams_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute parallel for simd' composite /// directive. /// /// \code /// #pragma omp teams distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp teams distribute parallel for simd' /// has clause 'private' with the variables 'x' /// class OMPTeamsDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForSimdDirectiveClass, OMPD_teams_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForSimdDirectiveClass, OMPD_teams_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeParallelForSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute parallel for' composite /// directive. /// /// \code /// #pragma omp teams distribute parallel for private(x) /// \endcode /// In this example directive '#pragma omp teams distribute parallel for' /// has clause 'private' with the variables 'x' /// class OMPTeamsDistributeParallelForDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForDirectiveClass, OMPD_teams_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForDirectiveClass, OMPD_teams_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTeamsDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeParallelForDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp target teams' directive. /// /// \code /// #pragma omp target teams if(a>0) /// \endcode /// In this example directive '#pragma omp target teams' has clause 'if' with /// condition 'a>0'. /// class OMPTargetTeamsDirective final : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetTeamsDirectiveClass, OMPD_target_teams, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetTeamsDirectiveClass, OMPD_target_teams, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDirectiveClass; } }; /// This represents '#pragma omp target teams distribute' combined directive. /// /// \code /// #pragma omp target teams distribute private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute' has clause /// 'private' with the variables 'x' /// class OMPTargetTeamsDistributeDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeDirectiveClass, OMPD_target_teams_distribute, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeDirectiveClass, OMPD_target_teams_distribute, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass; } }; /// This represents '#pragma omp target teams distribute parallel for' combined /// directive. /// /// \code /// #pragma omp target teams distribute parallel for private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute parallel /// for' has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeParallelForDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeParallelForDirectiveClass, OMPD_target_teams_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective( this, OMPTargetTeamsDistributeParallelForDirectiveClass, OMPD_target_teams_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTargetTeamsDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeParallelForDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp target teams distribute parallel for simd' /// combined directive. /// /// \code /// #pragma omp target teams distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute parallel /// for simd' has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeParallelForSimdDirectiveClass, OMPD_target_teams_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeParallelForSimdDirective( unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective( this, OMPTargetTeamsDistributeParallelForSimdDirectiveClass, OMPD_target_teams_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeParallelForSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp target teams distribute simd' combined /// directive. /// /// \code /// #pragma omp target teams distribute simd private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute simd' /// has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeSimdDirectiveClass, OMPD_target_teams_distribute_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeSimdDirectiveClass, OMPD_target_teams_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass; } }; } // end namespace clang #endif
matching.h
#ifndef REGISTRATION_MATCHING_H #define REGISTRATION_MATCHING_H #include <unordered_set> #include <utility> #include <opencv2/features2d.hpp> #include <pcl/common/norms.h> #include <pcl/point_cloud.h> #include <pcl/search/kdtree.h> #include <pcl/common/transforms.h> #include "common.h" #define MATCHING_RATIO_THRESHOLD 0.95f #define MATCHING_CLUSTER_THRESHOLD 0.8f #define MATCHING_CLUSTER_RADIUS_COEF 7.f template<typename FeatureT> class FeatureMatcher { public: using Ptr = std::shared_ptr<FeatureMatcher<FeatureT>>; using ConstPtr = std::shared_ptr<const FeatureMatcher<FeatureT>>; using KdTreeConstPtr = typename pcl::search::KdTree<PointN>::ConstPtr; virtual pcl::Correspondences match(const typename pcl::PointCloud<FeatureT>::ConstPtr &src, const typename pcl::PointCloud<FeatureT>::ConstPtr &tgt, const KdTreeConstPtr &pcd_tree_src, const KdTreeConstPtr &pcd_tree_tgt, const typename pcl::PointRepresentation<FeatureT>::Ptr &point_representation, int threads) = 0; inline float getAverageDistance() const { return average_distance_; } virtual std::string getClassName() = 0; protected: void printDebugInfo(const std::vector<MultivaluedCorrespondence> &mv_correspondences) { float dists_sum = 0.f; int n_dists = 0; for (int i = 0; i < mv_correspondences.size(); i++) { if (mv_correspondences[i].query_idx >= 0) { dists_sum += mv_correspondences[i].distances[0]; n_dists++; } } if (n_dists == 0) { PCL_ERROR("[%s::match] no distances were calculated.\n", getClassName().c_str()); } else { average_distance_ = dists_sum / (float) n_dists; PCL_DEBUG("[%s::match] average distance to nearest neighbour: %0.7f.\n", getClassName().c_str(), average_distance_); } } float average_distance_ = std::numeric_limits<float>::max(); AlignmentParameters parameters_; }; template<typename FeatureT> class LeftToRightFeatureMatcher : public FeatureMatcher<FeatureT> { public: using KdTreeConstPtr = typename pcl::search::KdTree<PointN>::ConstPtr; LeftToRightFeatureMatcher() = delete; LeftToRightFeatureMatcher(AlignmentParameters parameters) : parameters_(std::move(parameters)) {}; pcl::Correspondences match(const typename pcl::PointCloud<FeatureT>::ConstPtr &src, const typename pcl::PointCloud<FeatureT>::ConstPtr &tgt, const KdTreeConstPtr &pcd_tree_src, const KdTreeConstPtr &pcd_tree_tgt, const typename pcl::PointRepresentation<FeatureT>::Ptr &point_representation, int threads) override { int nr_dims = point_representation->getNumberOfDimensions(); std::vector<MultivaluedCorrespondence> mv_correspondences_ij; if (this->parameters_.guess != nullptr) { matchLocal<FeatureT>(pcd_tree_src->getInputCloud(), pcd_tree_tgt, src, tgt, mv_correspondences_ij, point_representation, *parameters_.guess, parameters_.match_search_radius, parameters_.randomness, threads); } else if (this->parameters_.use_bfmatcher) { matchBF<FeatureT>(src, tgt, mv_correspondences_ij, point_representation, this->parameters_.randomness, nr_dims, this->parameters_.bf_block_size); } else { matchFLANN<FeatureT>(src, tgt, mv_correspondences_ij, point_representation, this->parameters_.randomness, threads); } this->printDebugInfo(mv_correspondences_ij); std::vector<MultivaluedCorrespondence> mv_correspondences_ji; if (this->parameters_.guess != nullptr) { matchLocal<FeatureT>(pcd_tree_tgt->getInputCloud(), pcd_tree_src, tgt, src, mv_correspondences_ji, point_representation, parameters_.guess->inverse(), parameters_.match_search_radius, parameters_.randomness, threads); } else if (this->parameters_.use_bfmatcher) { matchBF<FeatureT>(tgt, src, mv_correspondences_ji, point_representation, this->parameters_.randomness, nr_dims, this->parameters_.bf_block_size); } else { matchFLANN<FeatureT>(tgt, src, mv_correspondences_ji, point_representation, this->parameters_.randomness, threads); } pcl::Correspondences correspondences_mutual; for (int i = 0; i < src->size(); ++i) { for (const int &j: mv_correspondences_ij[i].match_indices) { auto &corr_j = mv_correspondences_ji[j]; for (int k = 0; k < corr_j.match_indices.size(); ++k) { if (corr_j.match_indices[k] == i) { correspondences_mutual.push_back({i, j, corr_j.distances[k]}); break; } } } } PCL_DEBUG("[%s::match] %i correspondences remain after mutual filtering.\n", getClassName().c_str(), correspondences_mutual.size()); return correspondences_mutual; } inline std::string getClassName() override { return "LeftToRightFeatureMatcher"; } protected: AlignmentParameters parameters_; }; template<typename FeatureT> class RatioFeatureMatcher : public FeatureMatcher<FeatureT> { public: using KdTreeConstPtr = typename pcl::search::KdTree<PointN>::ConstPtr; RatioFeatureMatcher() = delete; RatioFeatureMatcher(AlignmentParameters parameters) : parameters_(std::move(parameters)) {}; pcl::Correspondences match(const typename pcl::PointCloud<FeatureT>::ConstPtr &src, const typename pcl::PointCloud<FeatureT>::ConstPtr &tgt, const KdTreeConstPtr &pcd_tree_src, const KdTreeConstPtr &pcd_tree_tgt, const typename pcl::PointRepresentation<FeatureT>::Ptr &point_representation, int threads) override { if (this->parameters_.randomness != 1) { PCL_WARN("[%s::match] k_corrs different from 1 cannot be used with ratio filtering, using k_corrs = 1.\n", getClassName().c_str()); } int nr_dims = point_representation->getNumberOfDimensions(); std::vector<MultivaluedCorrespondence> mv_correspondences_ij; if (this->parameters_.guess != nullptr) { matchLocal<FeatureT>(pcd_tree_src->getInputCloud(), pcd_tree_tgt, src, tgt, mv_correspondences_ij, point_representation, *parameters_.guess, parameters_.match_search_radius, parameters_.randomness, threads); } else if (this->parameters_.use_bfmatcher) { matchBF<FeatureT>(src, tgt, mv_correspondences_ij, point_representation, 2, nr_dims, this->parameters_.bf_block_size); } else { matchFLANN<FeatureT>(src, tgt, mv_correspondences_ij, point_representation, 2, threads); } this->printDebugInfo(mv_correspondences_ij); float dist1, dist2, ratio; pcl::Correspondences correspondences_ratio; for (auto &mv_corr: mv_correspondences_ij) { if (mv_corr.match_indices.size() != 2) { continue; } dist1 = std::min(mv_corr.distances[0], mv_corr.distances[1]); dist2 = std::max(mv_corr.distances[0], mv_corr.distances[1]); ratio = (dist2 == 0.f) ? 1.f : (dist1 / dist2); if (ratio < MATCHING_RATIO_THRESHOLD) { int i = (dist1 < dist2) ? 0 : 1; correspondences_ratio.push_back({mv_corr.query_idx, mv_corr.match_indices[i], ratio}); } } PCL_DEBUG("[%s::match] %i correspondences remain after ratio filtering.\n", getClassName().c_str(), correspondences_ratio.size()); return correspondences_ratio; } inline std::string getClassName() override { return "RatioFeatureMatcher"; } protected: AlignmentParameters parameters_; }; template<typename FeatureT> class ClusterFeatureMatcher : public FeatureMatcher<FeatureT> { public: using KdTreeConstPtr = typename pcl::search::KdTree<PointN>::ConstPtr; ClusterFeatureMatcher() = delete; ClusterFeatureMatcher(AlignmentParameters parameters) : parameters_(std::move(parameters)) {}; pcl::Correspondences match(const typename pcl::PointCloud<FeatureT>::ConstPtr &src, const typename pcl::PointCloud<FeatureT>::ConstPtr &tgt, const KdTreeConstPtr &pcd_tree_src, const KdTreeConstPtr &pcd_tree_tgt, const typename pcl::PointRepresentation<FeatureT>::Ptr &point_representation, int threads) override { int nr_dims = point_representation->getNumberOfDimensions(); std::vector<MultivaluedCorrespondence> mv_correspondences_ij; if (this->parameters_.guess != nullptr) { matchLocal<FeatureT>(pcd_tree_src->getInputCloud(), pcd_tree_tgt, src, tgt, mv_correspondences_ij, point_representation, *parameters_.guess, parameters_.match_search_radius, parameters_.randomness, threads); } else if (this->parameters_.use_bfmatcher) { matchBF<FeatureT>(src, tgt, mv_correspondences_ij, point_representation, this->parameters_.randomness, nr_dims, this->parameters_.bf_block_size); } else { matchFLANN<FeatureT>(src, tgt, mv_correspondences_ij, point_representation, this->parameters_.randomness, threads); } this->printDebugInfo(mv_correspondences_ij); std::vector<MultivaluedCorrespondence> mv_correspondences_ji; if (this->parameters_.guess != nullptr) { matchLocal<FeatureT>(pcd_tree_tgt->getInputCloud(), pcd_tree_src, tgt, src, mv_correspondences_ji, point_representation, parameters_.guess->inverse(), parameters_.match_search_radius, parameters_.randomness, threads); } else if (this->parameters_.use_bfmatcher) { matchBF<FeatureT>(tgt, src, mv_correspondences_ji, point_representation, this->parameters_.randomness, nr_dims, this->parameters_.bf_block_size); } else { matchFLANN<FeatureT>(tgt, src, mv_correspondences_ji, point_representation, this->parameters_.randomness, threads); } float matching_cluster_radius = MATCHING_CLUSTER_RADIUS_COEF * this->parameters_.voxel_size; pcl::Correspondences correspondences_cluster; for (int i = 0; i < src->size(); ++i) { for (int j: mv_correspondences_ij[i].match_indices) { float distance_i = calculateCorrespondenceDistance(i, j, matching_cluster_radius, mv_correspondences_ij, pcd_tree_src, pcd_tree_tgt); float distance_j = calculateCorrespondenceDistance(j, i, matching_cluster_radius, mv_correspondences_ji, pcd_tree_tgt, pcd_tree_src); if (distance_i < MATCHING_CLUSTER_THRESHOLD && distance_j < MATCHING_CLUSTER_THRESHOLD) { correspondences_cluster.push_back({i, j, distance_i}); } } } PCL_DEBUG("[%s::match] %i correspondences remain after cluster filtering.\n", getClassName().c_str(), correspondences_cluster.size()); return correspondences_cluster; } inline std::string getClassName() override { return "ClusterFeatureMatcher"; } protected: float calculateCorrespondenceDistance(int i, int j, float radius, const std::vector<MultivaluedCorrespondence> &mv_correspondences_ij, const KdTreeConstPtr &pcd_tree_src, const KdTreeConstPtr &pcd_tree_tgt) { std::unordered_set<int> i_neighbors, j_neighbors; pcl::Indices match_indices; std::vector<float> distances; pcd_tree_src->radiusSearch(i, radius, match_indices, distances); std::copy(match_indices.begin(), match_indices.end(), std::inserter(i_neighbors, i_neighbors.begin())); pcd_tree_tgt->radiusSearch(j, radius, match_indices, distances); std::copy(match_indices.begin(), match_indices.end(), std::inserter(j_neighbors, j_neighbors.begin())); int count_consistent_pairs = 0, count_pairs = 0; for (int i_neighbor: i_neighbors) { for (int i_neighbor_match: mv_correspondences_ij[i_neighbor].match_indices) { if (j_neighbors.contains(i_neighbor_match)) { count_consistent_pairs++; } count_pairs++; } } if (count_pairs == 0) { return 0; } return 1.f - (float) count_consistent_pairs / (float) count_pairs; } AlignmentParameters parameters_; }; template<typename FeatureT> typename FeatureMatcher<FeatureT>::Ptr getFeatureMatcher(const AlignmentParameters &parameters) { if (parameters.matching_id == MATCHING_RATIO) { return std::make_shared<RatioFeatureMatcher<FeatureT>>(parameters); } else if (parameters.matching_id == MATCHING_CLUSTER) { return std::make_shared<ClusterFeatureMatcher<FeatureT>>(parameters); } else if (parameters.matching_id != MATCHING_LEFT_TO_RIGHT) { PCL_WARN("[getFeatureMatcher] feature matcher %s isn't supported, left-to-right matcher will be used.", parameters.matching_id.c_str()); } return std::make_shared<LeftToRightFeatureMatcher<FeatureT>>(parameters); } template<typename T> class KNNResult { private: int capacity_; int count_; std::vector<int> indices_; std::vector<T> dists_; public: inline KNNResult(int capacity) : capacity_(capacity), count_(0) { indices_.reserve(capacity); dists_.reserve(capacity); } inline int size() const { return count_; } inline std::vector<int> getIndices() const { return indices_; } inline std::vector<T> getDistances() const { return dists_; } inline bool addPoint(T dist, int index) { if (count_ < capacity_) { indices_.resize(count_ + 1); dists_.resize(count_ + 1); } int i; for (i = count_; i > 0; --i) { if (dists_[i - 1] > dist) { if (i < capacity_) { dists_[i] = dists_[i - 1]; indices_[i] = indices_[i - 1]; } } else { break; } } if (i < capacity_) { dists_[i] = dist; indices_[i] = index; } if (count_ < capacity_) { count_++; } return true; } }; template<typename FeatureT> void pcl2cv(int nr_dims, const typename pcl::PointCloud<FeatureT>::ConstPtr &src, cv::OutputArray &dst, int size = 0, int offset = 0) { if (src->empty()) return; int rows = size == 0 ? (src->size() - offset) : std::min((int) (src->size() - offset), size); cv::Mat _src(rows, nr_dims, CV_32FC1, (void *) &src->points[offset], sizeof(src->points[0])); _src.copyTo(dst); } template<typename FeatureT> void matchFLANN(const typename pcl::PointCloud<FeatureT>::ConstPtr &query_features, const typename pcl::PointCloud<FeatureT>::ConstPtr &train_features, std::vector<MultivaluedCorrespondence> &mv_correspondences, const typename pcl::PointRepresentation<FeatureT>::Ptr point_representation, int k_matches, int threads) { pcl::KdTreeFLANN<FeatureT> feature_tree(new pcl::KdTreeFLANN<FeatureT>); feature_tree.setInputCloud(train_features); auto n = query_features->size(); mv_correspondences.resize(n, MultivaluedCorrespondence{}); #pragma omp parallel for num_threads(threads) default(none) shared(mv_correspondences, query_features, point_representation, feature_tree) firstprivate(n, k_matches) for (int i = 0; i < n; i++) { if (point_representation->isValid(query_features->points[i])) { mv_correspondences[i].query_idx = i; pcl::Indices &match_indices = mv_correspondences[i].match_indices; std::vector<float> &match_distances = mv_correspondences[i].distances; match_indices.resize(k_matches); match_distances.resize(k_matches); feature_tree.nearestKSearch(*query_features, i, k_matches, match_indices, match_distances); for (int j = 0; j < k_matches; ++j) { match_distances[j] = std::sqrt(match_distances[j]); } } } } template<typename FeatureT> void matchBF(const typename pcl::PointCloud<FeatureT>::ConstPtr &query_features, const typename pcl::PointCloud<FeatureT>::ConstPtr &train_features, std::vector<MultivaluedCorrespondence> &mv_correspondences, const typename pcl::PointRepresentation<FeatureT>::Ptr point_representation, int k_matches, int nr_dims, int block_size) { auto matcher = cv::BFMatcher::create(cv::NORM_L2); std::vector<std::vector<cv::DMatch>> matches; mv_correspondences.resize(query_features->size(), MultivaluedCorrespondence{}); int n_query_blocks = (query_features->size() + block_size - 1) / block_size; for (int i = 0; i < n_query_blocks; ++i) { for (int j = 0; j < (train_features->size() + block_size - 1) / block_size; ++j) { cv::UMat query_features_batch, train_features_batch; pcl2cv<FeatureT>(nr_dims, query_features, query_features_batch, block_size, i * block_size); pcl2cv<FeatureT>(nr_dims, train_features, train_features_batch, block_size, j * block_size); matcher->knnMatch(query_features_batch, train_features_batch, matches, k_matches); for (int l = 0; l < matches.size(); ++l) { if (matches[l].empty() || matches[l][0].queryIdx == -1) { continue; } int query_idx_local = matches[l][0].queryIdx; int query_idx = i * block_size + query_idx_local; for (int m = 0; m < matches[l].size(); ++m) { if (matches[l][m].queryIdx != query_idx_local) { PCL_ERROR("[matchBF] unexpected query index in brute-force matches!"); exit(1); } updateMultivaluedCorrespondence(mv_correspondences[query_idx], query_idx, k_matches, j * block_size + matches[l][m].trainIdx, matches[l][m].distance); } } matches.clear(); } PCL_DEBUG("[matchBF] %d / % d blocks processed.\n", i + 1, n_query_blocks); } for (int i = 0; i < query_features->size(); i++) { if (!point_representation->isValid(query_features->points[i])) { mv_correspondences[i] = MultivaluedCorrespondence{}; } } } template<typename FeatureT> void matchLocal(const PointNCloud::ConstPtr &query_pcd, const typename pcl::search::KdTree<PointN>::ConstPtr &train_tree, const typename pcl::PointCloud<FeatureT>::ConstPtr &query_features, const typename pcl::PointCloud<FeatureT>::ConstPtr &train_features, std::vector<MultivaluedCorrespondence> &mv_correspondences, const typename pcl::PointRepresentation<FeatureT>::Ptr point_representation, const Eigen::Matrix4f &guess, float match_search_radius, int k_matches, int threads) { PointNCloud transformed_query_pcd; pcl::transformPointCloudWithNormals(*query_pcd, transformed_query_pcd, guess); auto n = transformed_query_pcd.size(); mv_correspondences.resize(query_features->size(), MultivaluedCorrespondence{}); #pragma omp parallel num_threads(threads) default(none) \ shared(transformed_query_pcd, train_tree, query_features, train_features, mv_correspondences, point_representation) \ firstprivate(n, k_matches, match_search_radius) { std::vector<float> distances; pcl::Indices indices; int nr_dims = point_representation->getNumberOfDimensions(); #pragma omp for for (int query_idx = 0; query_idx < n; ++query_idx) { if (point_representation->isValid(query_features->points[query_idx])) { KNNResult<float> knnResult(k_matches); train_tree->radiusSearch(transformed_query_pcd.points[query_idx], match_search_radius, indices, distances); for (int train_idx: indices) { if (point_representation->isValid(train_features->points[train_idx])) { float dist = pcl::L2_Norm((float *) &query_features->points[query_idx], (float *) &train_features->points[train_idx], nr_dims); knnResult.addPoint(dist, train_idx); } } if (knnResult.size() > 0) { mv_correspondences[query_idx].query_idx = query_idx; mv_correspondences[query_idx].match_indices = knnResult.getIndices(); mv_correspondences[query_idx].distances = knnResult.getDistances(); } } } } } #endif
GB_AxB_saxpy3_symbolic.c
//------------------------------------------------------------------------------ // GB_AxB_saxpy3_symbolic: symbolic analysis for GB_AxB_saxpy3 //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Symbolic analysis for C=A*B, C<M>=A*B or C<!M>=A*B, via GB_AxB_saxpy3. // Coarse tasks compute nnz (C (:,j)) for each of their vectors j. Fine tasks // just scatter the mask M into the hash table. This phase does not depend on // the semiring, nor does it depend on the type of C, A, or B. It does access // the values of M, if the mask matrix M is present and not structural. // If B is hypersparse, C must also be hypersparse. // Otherwise, C must be sparse. #include "GB_AxB_saxpy3.h" #include "GB_AxB_saxpy3_template.h" #include "GB_atomics.h" #include "GB_bracket.h" #include "GB_unused.h" void GB_AxB_saxpy3_symbolic ( GrB_Matrix C, // Cp is computed for coarse tasks const GrB_Matrix M, // mask matrix M const bool Mask_comp, // M complemented, or not const bool Mask_struct, // M structural, or not const bool M_dense_in_place, const GrB_Matrix A, // A matrix; only the pattern is accessed const GrB_Matrix B, // B matrix; only the pattern is accessed GB_saxpy3task_struct *TaskList, // list of tasks, and workspace int ntasks, // total number of tasks int nfine, // number of fine tasks int nthreads // number of threads ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_ZOMBIES (M)) ; ASSERT (GB_JUMBLED_OK (M)) ; ASSERT (!GB_PENDING (M)) ; ASSERT (!GB_ZOMBIES (A)) ; ASSERT (GB_JUMBLED_OK (A)) ; ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (B)) ; ASSERT (GB_JUMBLED_OK (B)) ; ASSERT (!GB_PENDING (B)) ; //-------------------------------------------------------------------------- // get M, A, B, and C //-------------------------------------------------------------------------- int64_t *GB_RESTRICT Cp = C->p ; const int64_t cvlen = C->vlen ; const int64_t *GB_RESTRICT Bp = B->p ; const int64_t *GB_RESTRICT Bh = B->h ; const int8_t *GB_RESTRICT Bb = B->b ; const int64_t *GB_RESTRICT Bi = B->i ; const int64_t bvlen = B->vlen ; const bool B_jumbled = B->jumbled ; const bool B_is_bitmap = GB_IS_BITMAP (B) ; const bool B_is_sparse = GB_IS_SPARSE (B) ; const bool B_is_hyper = GB_IS_HYPERSPARSE (B) ; const bool B_is_sparse_or_hyper = B_is_sparse || B_is_hyper ; const int64_t *GB_RESTRICT Ap = A->p ; const int64_t *GB_RESTRICT Ah = A->h ; const int8_t *GB_RESTRICT Ab = A->b ; const int64_t *GB_RESTRICT Ai = A->i ; const int64_t anvec = A->nvec ; const int64_t avlen = A->vlen ; const bool A_is_bitmap = GB_IS_BITMAP (A) ; const bool A_is_sparse = GB_IS_SPARSE (A) ; const bool A_is_hyper = GB_IS_HYPERSPARSE (A) ; const bool A_jumbled = A->jumbled ; const int64_t *GB_RESTRICT Mp = NULL ; const int64_t *GB_RESTRICT Mh = NULL ; const int8_t *GB_RESTRICT Mb = NULL ; const int64_t *GB_RESTRICT Mi = NULL ; const GB_void *GB_RESTRICT Mx = NULL ; size_t msize = 0 ; int64_t mnvec = 0 ; int64_t mvlen = 0 ; const bool M_is_hyper = GB_IS_HYPERSPARSE (M) ; const bool M_is_bitmap = GB_IS_BITMAP (M) ; const bool M_jumbled = GB_JUMBLED (M) ; if (M != NULL) { Mp = M->p ; Mh = M->h ; Mb = M->b ; Mi = M->i ; Mx = (GB_void *) (Mask_struct ? NULL : (M->x)) ; msize = M->type->size ; mnvec = M->nvec ; mvlen = M->vlen ; } // 3 cases: // M not present and Mask_comp false: compute C=A*B // M present and Mask_comp false: compute C<M>=A*B // M present and Mask_comp true : compute C<!M>=A*B // If M is NULL on input, then Mask_comp is also false on input. const bool mask_is_M = (M != NULL && !Mask_comp) ; // ignore the mask if present, not complemented, dense and // used in place, structural, and not bitmap. In this case, // all entries in M are true, so M can be ignored. const bool ignore_mask = mask_is_M && M_dense_in_place && Mask_struct && !M_is_bitmap ; //========================================================================== // phase1: count nnz(C(:,j)) for coarse tasks, scatter M for fine tasks //========================================================================== // At this point, all of Hf [...] is zero, for all tasks. // Hi and Hx are not initialized. int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- int64_t hash_size = TaskList [taskid].hsize ; bool use_Gustavson = (hash_size == cvlen) ; if (taskid < nfine) { //------------------------------------------------------------------ // no work for fine tasks in phase1 if M is not present //------------------------------------------------------------------ if (M == NULL) continue ; //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ int64_t kk = TaskList [taskid].vector ; int64_t bjnz = (Bp == NULL) ? bvlen : (Bp [kk+1] - Bp [kk]) ; // no work to do if B(:,j) is empty if (bjnz == 0) continue ; // partition M(:,j) GB_GET_M_j ; // get M(:,j) int team_size = TaskList [taskid].team_size ; int leader = TaskList [taskid].leader ; int my_teamid = taskid - leader ; int64_t mystart, myend ; GB_PARTITION (mystart, myend, mjnz, my_teamid, team_size) ; mystart += pM_start ; myend += pM_start ; if (use_Gustavson) { //-------------------------------------------------------------- // phase1: fine Gustavson task, C<M>=A*B or C<!M>=A*B //-------------------------------------------------------------- // Scatter the values of M(:,j) into Hf. No atomics needed // since all indices i in M(;,j) are unique. Do not scatter // the mask if M(:,j) is a dense vector, since in that case // the numeric phase accesses M(:,j) directly, not via Hf. if (mjnz > 0) { int8_t *GB_RESTRICT Hf = (int8_t *GB_RESTRICT) TaskList [taskid].Hf ; GB_SCATTER_M_j (mystart, myend, 1) ; } } else if (!M_dense_in_place) { //-------------------------------------------------------------- // phase1: fine hash task, C<M>=A*B or C<!M>=A*B //-------------------------------------------------------------- // If M_dense_in_place is true, this is skipped. The mask M // is dense, and is used in-place. // The least significant 2 bits of Hf [hash] is the flag f, and // the upper bits contain h, as (h,f). After this phase1, if // M(i,j)=1 then the hash table contains ((i+1),1) in Hf [hash] // at some location. // Later, the flag values of f = 2 and 3 are also used. // Only f=1 is set in this phase. // h == 0, f == 0: unoccupied and unlocked // h == i+1, f == 1: occupied with M(i,j)=1 int64_t *GB_RESTRICT Hf = (int64_t *GB_RESTRICT) TaskList [taskid].Hf ; int64_t hash_bits = (hash_size-1) ; // scan my M(:,j) for (int64_t pM = mystart ; pM < myend ; pM++) { GB_GET_M_ij (pM) ; // get M(i,j) if (!mij) continue ; // skip if M(i,j)=0 int64_t i = GBI (Mi, pM, mvlen) ; int64_t i_mine = ((i+1) << 2) + 1 ; // ((i+1),1) for (GB_HASH (i)) { int64_t hf ; // swap my hash entry into the hash table; // does the following using an atomic capture: // { hf = Hf [hash] ; Hf [hash] = i_mine ; } GB_ATOMIC_CAPTURE_INT64 (hf, Hf [hash], i_mine) ; if (hf == 0) break ; // success // i_mine has been inserted, but a prior entry was // already there. It needs to be replaced, so take // ownership of this displaced entry, and keep // looking until a new empty slot is found for it. i_mine = hf ; } } } } else { //------------------------------------------------------------------ // coarse tasks: compute nnz in each vector of A*B(:,kfirst:klast) //------------------------------------------------------------------ int64_t *GB_RESTRICT Hf = (int64_t *GB_RESTRICT) TaskList [taskid].Hf ; int64_t kfirst = TaskList [taskid].start ; int64_t klast = TaskList [taskid].end ; int64_t mark = 0 ; if (use_Gustavson) { //-------------------------------------------------------------- // phase1: coarse Gustavson task //-------------------------------------------------------------- if (M == NULL) { //---------------------------------------------------------- // phase1: coarse Gustavson task, C=A*B //---------------------------------------------------------- #define GB_SAXPY_COARSE_GUSTAVSON_NOMASK_PHASE1 #include "GB_meta16_factory.c" #undef GB_SAXPY_COARSE_GUSTAVSON_NOMASK_PHASE1 } else if (mask_is_M) { //---------------------------------------------------------- // phase1: coarse Gustavson task, C<M>=A*B //---------------------------------------------------------- #define GB_SAXPY_COARSE_GUSTAVSON_M_PHASE1 #include "GB_meta16_factory.c" #undef GB_SAXPY_COARSE_GUSTAVSON_M_PHASE1 } else { //---------------------------------------------------------- // phase1: coarse Gustavson task, C<!M>=A*B //---------------------------------------------------------- #define GB_SAXPY_COARSE_GUSTAVSON_NOTM_PHASE1 #include "GB_meta16_factory.c" #undef GB_SAXPY_COARSE_GUSTAVSON_NOTM_PHASE1 } } else { //-------------------------------------------------------------- // phase1: coarse hash task //-------------------------------------------------------------- int64_t *GB_RESTRICT Hi = TaskList [taskid].Hi ; int64_t hash_bits = (hash_size-1) ; if (M == NULL || ignore_mask) { //---------------------------------------------------------- // phase1: coarse hash task, C=A*B //---------------------------------------------------------- // no mask present, or mask ignored #undef GB_CHECK_MASK_ij #define GB_SAXPY_COARSE_HASH_PHASE1 #include "GB_meta16_factory.c" } else if (mask_is_M) { //---------------------------------------------------------- // phase1: coarse hash task, C<M>=A*B //---------------------------------------------------------- if (M_dense_in_place) { //------------------------------------------------------ // M(:,j) is dense. M is not scattered into Hf. //------------------------------------------------------ ASSERT (!Mask_struct || M_is_bitmap) ; #define GB_CHECK_MASK_ij \ bool mij = \ (M_is_bitmap ? Mjb [i] : 1) && \ (Mask_struct ? 1 : (Mjx [i] != 0)) ; \ if (!mij) continue ; switch (msize) { default: case 1 : #undef M_TYPE #define M_TYPE uint8_t #undef M_SIZE #define M_SIZE 1 #include "GB_meta16_factory.c" break ; case 2 : #undef M_TYPE #define M_TYPE uint16_t #include "GB_meta16_factory.c" break ; case 4 : #undef M_TYPE #define M_TYPE uint32_t #include "GB_meta16_factory.c" break ; case 8 : #undef M_TYPE #define M_TYPE uint64_t #include "GB_meta16_factory.c" break ; case 16 : #undef M_TYPE #define M_TYPE uint64_t #undef M_SIZE #define M_SIZE 2 #undef GB_CHECK_MASK_ij #define GB_CHECK_MASK_ij \ bool mij = \ (M_is_bitmap ? Mjb [i] : 1) && \ (Mask_struct ? 1 : \ (Mjx [2*i] != 0) || \ (Mjx [2*i+1] != 0)) ; \ if (!mij) continue ; #include "GB_meta16_factory.c" break ; } #undef GB_SAXPY_COARSE_HASH_PHASE1 } else { //------------------------------------------------------ // M is sparse and scattered into Hf //------------------------------------------------------ #define GB_SAXPY_COARSE_HASH_M_PHASE1 #include "GB_meta16_factory.c" #undef GB_SAXPY_COARSE_HASH_M_PHASE1 } } else { //---------------------------------------------------------- // phase1: coarse hash task, C<!M>=A*B //---------------------------------------------------------- if (M_dense_in_place) { //------------------------------------------------------ // M(:,j) is dense. M is not scattered into Hf. //------------------------------------------------------ if (Mask_struct && !M_is_bitmap) { // structural mask, complemented, not bitmap. // No work to do; C is empty. for (int64_t kk = kfirst ; kk <= klast ; kk++) { Cp [kk] = 0 ; } continue ; } #define GB_SAXPY_COARSE_HASH_PHASE1 #undef GB_CHECK_MASK_ij #define GB_CHECK_MASK_ij \ bool mij = \ (M_is_bitmap ? Mjb [i] : 1) && \ (Mask_struct ? 1 : (Mjx [i] != 0)) ; \ if (mij) continue ; switch (msize) { default: case 1 : #undef M_TYPE #define M_TYPE uint8_t #undef M_SIZE #define M_SIZE 1 #include "GB_meta16_factory.c" break ; case 2 : #undef M_TYPE #define M_TYPE uint16_t #include "GB_meta16_factory.c" break ; case 4 : #undef M_TYPE #define M_TYPE uint32_t #include "GB_meta16_factory.c" break ; case 8 : #undef M_TYPE #define M_TYPE uint64_t #include "GB_meta16_factory.c" break ; case 16 : #undef M_TYPE #define M_TYPE uint64_t #undef M_SIZE #define M_SIZE 2 #undef GB_CHECK_MASK_ij #define GB_CHECK_MASK_ij \ bool mij = \ (M_is_bitmap ? Mjb [i] : 1) && \ (Mask_struct ? 1 : \ (Mjx [2*i] != 0) || \ (Mjx [2*i+1] != 0)) ; \ if (mij) continue ; #include "GB_meta16_factory.c" break ; } #undef GB_SAXPY_COARSE_HASH_PHASE1 } else { //------------------------------------------------------ // M is sparse and scattered into Hf //------------------------------------------------------ #define GB_SAXPY_COARSE_HASH_NOTM_PHASE1 #include "GB_meta16_factory.c" #undef GB_SAXPY_COARSE_HASH_NOTM_PHASE1 } } } } } //-------------------------------------------------------------------------- // check result for phase1 for fine tasks //-------------------------------------------------------------------------- #ifdef GB_DEBUG if (M != NULL) { for (taskid = 0 ; taskid < nfine ; taskid++) { int64_t kk = TaskList [taskid].vector ; ASSERT (kk >= 0 && kk < B->nvec) ; int64_t bjnz = (Bp == NULL) ? bvlen : (Bp [kk+1] - Bp [kk]) ; // no work to do if B(:,j) is empty if (bjnz == 0) continue ; int64_t hash_size = TaskList [taskid].hsize ; bool use_Gustavson = (hash_size == cvlen) ; int leader = TaskList [taskid].leader ; if (leader != taskid) continue ; GB_GET_M_j ; // get M(:,j) if (mjnz == 0) continue ; int64_t mjcount2 = 0 ; int64_t mjcount = 0 ; for (int64_t pM = pM_start ; pM < pM_end ; pM++) { GB_GET_M_ij (pM) ; // get M(i,j) if (mij) mjcount++ ; } if (use_Gustavson) { // phase1: fine Gustavson task, C<M>=A*B or C<!M>=A*B int8_t *GB_RESTRICT Hf = (int8_t *GB_RESTRICT) TaskList [taskid].Hf ; for (int64_t pM = pM_start ; pM < pM_end ; pM++) { GB_GET_M_ij (pM) ; // get M(i,j) int64_t i = GBI (Mi, pM, mvlen) ; ASSERT (Hf [i] == mij) ; } for (int64_t i = 0 ; i < cvlen ; i++) { ASSERT (Hf [i] == 0 || Hf [i] == 1) ; if (Hf [i] == 1) mjcount2++ ; } ASSERT (mjcount == mjcount2) ; } else if (!M_dense_in_place) { // phase1: fine hash task, C<M>=A*B or C<!M>=A*B // h == 0, f == 0: unoccupied and unlocked // h == i+1, f == 1: occupied with M(i,j)=1 int64_t *GB_RESTRICT Hf = (int64_t *GB_RESTRICT) TaskList [taskid].Hf ; int64_t hash_bits = (hash_size-1) ; for (int64_t pM = pM_start ; pM < pM_end ; pM++) { GB_GET_M_ij (pM) ; // get M(i,j) if (!mij) continue ; // skip if M(i,j)=0 int64_t i = GBI (Mi, pM, mvlen) ; int64_t i_mine = ((i+1) << 2) + 1 ; // ((i+1),1) int64_t probe = 0 ; for (GB_HASH (i)) { int64_t hf = Hf [hash] ; if (hf == i_mine) { mjcount2++ ; break ; } ASSERT (hf != 0) ; probe++ ; ASSERT (probe < cvlen) ; } } ASSERT (mjcount == mjcount2) ; mjcount2 = 0 ; for (int64_t hash = 0 ; hash < hash_size ; hash++) { int64_t hf = Hf [hash] ; int64_t h = (hf >> 2) ; // empty (0), or a 1-based int64_t f = (hf & 3) ; // 0 if empty or 1 if occupied if (f == 1) ASSERT (h >= 1 && h <= cvlen) ; ASSERT (hf == 0 || f == 1) ; if (f == 1) mjcount2++ ; } ASSERT (mjcount == mjcount2) ; } } } #endif }
VGG16_predict.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <string.h> #include <ctype.h> #include <xtime_l.h> #include "wc1.h" #include "wc2.h" #include "wc3.h" #include "wc4.h" #include "wc5.h" #include "wc6.h" #include "wc7.h" #include "wc8.h" #include "wc9.h" #include "wc10.h" #include "wc11.h" #include "weight_im.h" #include "weigh_bc.h" #define _CRT_SECURE_NO_WARNINGS 1 int counter=0; int counter1=0; #define SIZE 224 #define CONV_SIZE 3 volatile int * soft_control = (volatile int *)(0x43C00000); volatile float * soft_input = (volatile float *)(0x43C11000); volatile int * soft_output= (volatile int *)(0x43C21000); volatile float * soft_output1= (volatile float *)(0x43C31000); volatile float * soft_parameter = (volatile float *)(0x43C41000); volatile int * control_1 = (volatile int *)(0x43C50000); volatile float * bias_dense = (volatile float *)(0x43C64000); volatile float * memblock1_in= (volatile float *)(0x43C74000); volatile float * memblock1_out= (volatile float *)(0x43C84000); // Weights and image block START volatile float image[3][224][224]; volatile float wc1[64][3][3][3]; volatile float wc2[64][64][3][3]; volatile float wc3[128][64][3][3]; volatile float wc4[128][128][3][3]; volatile float wc5[256][128][3][3]; volatile float wc6[256][256][3][3]; volatile float wc7[256][256][3][3]; volatile float wc8[512][256][3][3]; volatile float wc9[512][512][3][3]; volatile float wc10[512][512][3][3]; volatile float wc11[512][512][3][3]; volatile float wc12[512][512][3][3]; volatile float wc13[512][512][3][3]; volatile float wd2[4096][4096]; volatile float wd3[4096][1000]; // Blocks for intermediate convolutions volatile int mem_block_shape[3] = {512, SIZE, SIZE}; float mem_block1[512][SIZE][SIZE]; float mem_block2[512][SIZE][SIZE]; // Blocks for dense flatten layers int mem_block_dense_shape = { 512 * 7 * 7 }; float mem_block1_dense[512*7*7]; float mem_block2_dense[512*7*7]; XTime tStart,tEnd; XTime tStart1,tEnd1; XTime tStart2,tEnd2; // Weights and image block END /* void gettimeofday(time_t *tp, char *_) { *tp = clock(); return; } double get_seconds(time_t timeStart, time_t timeEnd) { return (double)(timeEnd - timeStart) / CLOCKS_PER_SEC; } */ void reset_mem_block1() { int i, j, k; for (i = 0; i < mem_block_shape[0]; i++) { for (j = 0; j < mem_block_shape[1]; j++) { for (k = 0; k < mem_block_shape[2]; k++) { mem_block1[i][j][k] = 0.0; } } } } void reset_mem_block2() { int i, j, k; for (i = 0; i < mem_block_shape[0]; i++) { for (j = 0; j < mem_block_shape[1]; j++) { for (k = 0; k < mem_block_shape[2]; k++) { mem_block2[i][j][k] = 0.0; } } } } void reset_mem_block_dense1() { int i; for (i = 0; i < mem_block_dense_shape; i++) { mem_block1_dense[i] = 0.0; } } void reset_mem_block_dense2() { int i; for (i = 0; i < mem_block_dense_shape; i++) { mem_block2_dense[i] = 0.0; } } void read_weights(int lvls) { int i, j, k, l; counter = 0; //printf("Read1\n"); for (i = 0; i < 64; i++) { for (j = 0; j < 3; j++) { for (k = 0; k <3; k++) { for (l = 0; l < 3; l++) { wc1[i][j][CONV_SIZE - k - 1][CONV_SIZE - l - 1] = wc1_h[counter]; counter++; } } } } counter = 0; //printf("Read2\n"); for (i = 0; i < 64; i++) { for (j = 0; j < 64; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { wc2[i][j][CONV_SIZE - k - 1][CONV_SIZE - l - 1] = wc2_h[counter];; counter++; } } } } counter = 0; //printf("Read3\n"); for (i = 0; i < 128; i++) { for (j = 0; j < 64; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { wc3[i][j][CONV_SIZE - k - 1][CONV_SIZE - l - 1] = wc3_h[counter];; counter++; } } } } counter = 0; //printf("Read4\n"); for (i = 0; i < 128; i++) { for (j = 0; j < 128; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { wc4[i][j][CONV_SIZE - k - 1][CONV_SIZE - l - 1] = wc4_h[counter]; counter++; } } } } counter = 0; //printf("Read5\n"); for (i = 0; i < 256; i++) { for (j = 0; j < 128; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { wc5[i][j][CONV_SIZE - k - 1][CONV_SIZE - l - 1] = wc5_h[counter]; counter++; } } } } counter = 0; //printf("Read6\n"); for (i = 0; i < 256; i++) { for (j = 0; j < 256; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { wc6[i][j][CONV_SIZE - k - 1][CONV_SIZE - l - 1] = wc6_h[counter]; counter++; } } } } counter = 0; //printf("Read7\n"); for (i = 0; i < 256; i++) { for (j = 0; j < 256; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { wc7[i][j][CONV_SIZE - k - 1][CONV_SIZE - l - 1] = wc7_h[counter]; counter++; } } } } counter = 0; //printf("Read8\n"); for (i = 0; i < 512; i++) { for (j = 0; j < 256; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { wc8[i][j][CONV_SIZE - k - 1][CONV_SIZE - l - 1] = wc8_h[counter]; counter++; } } } } counter = 0; //printf("Read9\n"); for (i = 0; i < 512; i++) { for (j = 0; j < 512; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { //if (i%2==0){ wc9[i][j][CONV_SIZE - k - 1][CONV_SIZE - l - 1] = wc9_h[counter]; counter++; //} //else{ // wc10[i][j][CONV_SIZE - k - 1][CONV_SIZE - l - 1] =-0.003*(i%4); //} } } } } counter = 0; //printf("Read10\n"); for (i = 0; i < 512; i++) { for (j = 0; j < 512; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { ///if (i%2==0){ wc10[i][j][CONV_SIZE - k - 1][CONV_SIZE - l - 1] = wc10_h[counter]; counter++; //} //else{ // wc10[i][j][CONV_SIZE - k - 1][CONV_SIZE - l - 1] =0.003*(i%4); //} } } } } counter = 0; //printf("Read11\n"); for (i = 0; i < 512; i++) { for (j = 0; j < 512; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { //if (i%2==0){ wc11[i][j][CONV_SIZE - k - 1][CONV_SIZE - l - 1] =wc11_h[counter]; counter++; //} // else{ // wc11[i][j][CONV_SIZE - k - 1][CONV_SIZE - l - 1] =-0.003*(i%4); // } } } } } counter = 0; //printf("Read12\n"); for (i = 0; i < 512; i++) { for (j = 0; j < 512; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { //if (i%2==0){ wc10[i][j][CONV_SIZE - k - 1][CONV_SIZE - l - 1] =wc10_h[counter]; counter++; // } //else{ // wc12[i][j][CONV_SIZE - k - 1][CONV_SIZE - l - 1] =0.003*(i%4); // } } } } } counter = 0; //printf("Read13\n"); for (i = 0; i < 512; i++) { for (j = 0; j < 512; j++) { for (k = 0; k < 3; k++) { for (l = 0; l < 3; l++) { //if (i%2==0){ wc11[i][j][CONV_SIZE - k - 1][CONV_SIZE - l - 1] = wc11_h[counter]; counter++; //} //else{ //wc13[i][j][CONV_SIZE - k - 1][CONV_SIZE - l - 1] =-0.003*(i%4); //} } } } } //printf("bias13\n"); //printf("Read dense block %d weights\n", 1); for (i = 0; i < 4096; i++) { for (j = 0; j < 4096; j++) { // fscanf(iin, "%f", &dval); if(i%2==0){ wd2[i][j] =0.003*(i%4); } else{ wd2[i][j] =-0.003*(i%4); } } } //printf("Read dense block %d weights\n", 2); for (i = 0; i < 4096; i++) { for (j = 0; j < 1000; j++) { //fscanf(iin, "%f", &dval); if(i%2==0){ wd3[i][j] =0.003*(i%4); } else{ wd3[i][j] =-0.003*(i%4); } } } } void read_image() { int i, j, l; int counter = 0; /* Reading image */ for (i = 0; i < SIZE; i++) { for (j = 0; j < SIZE; j++) { for (l = 0; l < 3; l++) { image[l][i][j] = image_h[counter]; counter++; } } } } void normalize_image() { int i, j, l; //printf("normalize"); float coef[3] = { 103.939, 116.779, 123.68 }; for (l = 0; l < 3; l++) { for (i = 0; i < SIZE; i++) { for (j = 0; j < SIZE; j++) { image[l][i][j] -= coef[l]; } } } } void convolution_3_x_3(float matrix[224][224], float kernel[3][3], int size, int ind) { int i, j; float sum; float zeropad[SIZE + 2][SIZE + 2] = { 0.0 }; for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { zeropad[i + 1][j + 1] = matrix[i][j]; } } for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { sum = zeropad[i][j] * kernel[0][0] + zeropad[i + 1][j] * kernel[1][0] + zeropad[i + 2][j] * kernel[2][0] + zeropad[i][j + 1] * kernel[0][1] + zeropad[i + 1][j + 1] * kernel[1][1] + zeropad[i + 2][j + 1] * kernel[2][1] + zeropad[i][j + 2] * kernel[0][2] + zeropad[i + 1][j + 2] * kernel[1][2] + zeropad[i + 2][j + 2] * kernel[2][2]; mem_block1[ind][i][j] += sum; } } } void convolution_3_x_3_2(float matrix[224][224], float kernel[3][3], int size, int ind) { int i, j; float sum; float zeropad[SIZE + 2][SIZE + 2] = { 0.0 }; for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { zeropad[i + 1][j + 1] = matrix[i][j]; } } for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { sum = zeropad[i][j] * kernel[0][0] + zeropad[i + 1][j] * kernel[1][0] + zeropad[i + 2][j] * kernel[2][0] + zeropad[i][j + 1] * kernel[0][1] + zeropad[i + 1][j + 1] * kernel[1][1] + zeropad[i + 2][j + 1] * kernel[2][1] + zeropad[i][j + 2] * kernel[0][2] + zeropad[i + 1][j + 2] * kernel[1][2] + zeropad[i + 2][j + 2] * kernel[2][2]; mem_block2[ind][i][j] += sum; } } } void add_bias_and_relu1(float bs, int size, int ind) { int i, j; //counter1=0; for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { ////printf("%.12lf\n", out[i][j]); //counter1++; mem_block1[ind][i][j] += bs; if (mem_block1[ind][i][j] < 0) mem_block1[ind][i][j] = 0.0; } } ////printf("layer x %d\n",counter1); } void add_bias_and_relu2(float bs, int size, int ind) { int i, j; //counter1=0; for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { ////printf("%.12lf\n", out[i][j]); //counter1++; mem_block2[ind][i][j] += bs; if (mem_block2[ind][i][j] < 0) mem_block2[ind][i][j] = 0.0; } } ////printf("layer x %d\n",counter1); } void add_bias_and_relu_flatten1(float bs[4096], int size, int relu) { int i; counter1 = 0; for (i = 0; i < size; i++) { mem_block2_dense[i] += bs[i]; counter1++; if (relu == 1) { if (mem_block2_dense[i] < 0) mem_block2_dense[i] = 0.0; } } //printf("flatt x %d\n", counter1); } void add_bias_and_relu_flatten2(float bs[4096], int size, int relu) { int i; counter1 = 0; for (i = 0; i < size; i++) { mem_block1_dense[i] += bs[i]; counter1++; if (relu == 1) { if (mem_block1_dense[i] < 0) mem_block1_dense[i] = 0.0; } } //printf("flatt x %d\n", counter1); } void add_bias_and_relu_flatten3(float bs[1000], int size, int relu) { int i; counter1 = 0; for (i = 0; i < size; i++) { mem_block2_dense[i] += bs[i]; counter1++; if (relu == 1) { if (mem_block2_dense[i] < 0) mem_block2_dense[i] = 0.0; } } //printf("flatt x %d\n", counter1); } float max_of_4(float a, float b, float c, float d) { if (a >= b && a >= c && a >= d) { return a; } if (b >= c && b >= d) { return b; } if (c >= d) { return c; } return d; } void maxpooling1(float out[224][224], int size, int ind) { int i, j; for (i = 0; i < size; i += 2) { for (j = 0; j < size; j += 2) { out[i / 2][j / 2] = max_of_4(out[i][j], out[i + 1][j], out[i][j + 1], out[i + 1][j + 1]); } } for (i = 0; i < 224; i++) for (j = 0; j < 224; j++) mem_block1[ind][i][j] = out[i][j]; } void maxpooling2(float out[224][224], int size, int ind) { int i, j; for (i = 0; i < size; i += 2) { for (j = 0; j < size; j += 2) { out[i / 2][j / 2] = max_of_4(out[i][j], out[i + 1][j], out[i][j + 1], out[i + 1][j + 1]); } } for(i =0; i<224;i++) for( j =0;j<224;j++) mem_block2[ind][i][j] = out[i][j]; } void flatten(float in[512][SIZE][SIZE], float* out, int sh0, int sh1, int sh2) { int i, j, k, total = 0; for (i = 0; i < sh0; i++) { for (j = 0; j < sh1; j++) { for (k = 0; k < sh2; k++) { out[total] = in[i][j][k]; total += 1; } } } for (i = 0; i < total; i++) mem_block1_dense[i] += out[i]; } void dense(float* in, float* out, int sh_in, int sh_out) { int i, j; //#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(numthreads) for (i = 0; i < sh_out; i++) { float sum = 0.0; for (j = 0; j < sh_in; j++) { if(i%2==0){ sum += in[j] *(0.003*(i%4)); } else{ sum += in[j] *(-0.003*(i%4)); } } out[i] = sum; } } void dense1(float* in, float weights[4096][4096], float* out, int sh_in, int sh_out) { int i, j; //#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(numthreads) for (i = 0; i < sh_out; i++) { float sum = 0.0; for (j = 0; j < sh_in; j++) { sum += in[j] * weights[j][i]; } out[i] = sum; } } void dense2(float* in, float weights[4096][1000], float* out, int sh_in, int sh_out) { int i, j; //#pragma omp parallel for private(j) schedule(dynamic,1) num_threads(numthreads) for (i = 0; i < sh_out; i++) { float sum = 0.0; for (j = 0; j < sh_in; j++) { sum += in[j] * weights[j][i]; } out[i] = sum; } } void softmax(int sh_out) { int i; float max_val, sum; int counter =0; int class[1000]; float result[1000]; max_val = mem_block2_dense[0]; for (i = 1; i < sh_out; i++) { if (mem_block2_dense[i] > max_val) max_val = mem_block2_dense[i]; } sum = 0.0; for (i = 0; i < sh_out; i++) { mem_block2_dense[i] = exp(mem_block2_dense[i] - max_val); sum += mem_block2_dense[i]; } for (i = 0; i < sh_out; i++) { mem_block2_dense[i] /= sum; } for (i = 0; i < 1000; i++) { class[i] = counter; result[i] = mem_block2_dense[i]; //printf("class: %d\n",class[i]); //printf("result: %f\n",result[i]); if (counter == 999) { counter = -1; } counter = counter + 1; } } void get_VGG16() { int i, j; int level, cur_size; // Init intermediate memory reset_mem_block1(); reset_mem_block2(); reset_mem_block_dense1(); reset_mem_block_dense2(); // Layer 1 (Convolution 3 -> 64) level = 0; cur_size = SIZE; for (i = 0; i < 64; i++) { for (j = 0; j < 3; j++) { convolution_3_x_3(image[j], wc1[i][j], cur_size,i); } add_bias_and_relu1(bc1[i], cur_size, i); } // Layer 2 (Convolution 64 -> 64) level = 1; for (i = 0; i < 64; i++) { for (j = 0; j < 64; j++) { convolution_3_x_3_2(mem_block1[j], wc2[i][j], cur_size,i); } add_bias_and_relu2(bc2[i], cur_size, i); } reset_mem_block1(); // Layer 3 (MaxPooling) for (i = 0; i < 64; i++) { maxpooling2(mem_block2[i], cur_size, i); } cur_size /= 2; // Layer 4 (Convolution 64 -> 128) level = 2; for (i = 0; i < 128; i++) { for (j = 0; j < 64; j++) { convolution_3_x_3(mem_block2[j], wc3[i][j], cur_size,i); } add_bias_and_relu1(bc3[i], cur_size, i); } reset_mem_block2(); // Layer 5 (Convolution 128 -> 128) level = 3; for (i = 0; i < 128; i++) { for (j = 0; j < 128; j++) { convolution_3_x_3_2(mem_block1[j], wc4[i][j], cur_size,i); } add_bias_and_relu2(bc4[i], cur_size, i); } reset_mem_block1(); // Layer 6 (MaxPooling) for (i = 0; i < 128; i++) { maxpooling2(mem_block2[i], cur_size, i); } cur_size /= 2; // Layer 7 (Convolution 128 -> 256) level = 4; for (i = 0; i < 256; i++) { for (j = 0; j < 128; j++) { convolution_3_x_3(mem_block2[j], wc5[i][j], cur_size,i); } add_bias_and_relu1(bc5[i], cur_size, i); } reset_mem_block2(); // Layer 8 (Convolution 256 -> 256) level = 5; for (i = 0; i < 256; i++) { for (j = 0; j < 256; j++) { convolution_3_x_3_2(mem_block1[j], wc6[i][j], cur_size,i); } add_bias_and_relu2(bc6[i], cur_size, i); } reset_mem_block1(); // Layer 9 (Convolution 256 -> 256) level = 6; for (i = 0; i < 256; i++) { for (j = 0; j < 256; j++) { convolution_3_x_3(mem_block2[j], wc7[i][j], cur_size,i); } add_bias_and_relu1(bc7[i], cur_size, i); } reset_mem_block2(); // Layer 10 (MaxPooling) for (i = 0; i < 256; i++) { maxpooling1(mem_block1[i], cur_size, i); } cur_size /= 2; // Layer 11 (Convolution 256 -> 512) level = 7; for (i = 0; i < 512; i++) { for (j = 0; j < 256; j++) { convolution_3_x_3_2(mem_block1[j], wc8[i][j], cur_size,i); } add_bias_and_relu2(bc8[i], cur_size, i); } reset_mem_block1(); // Layer 12 (Convolution 512 -> 512) level = 8; for (i = 0; i <512; i++) { for (j = 0; j < 512; j++) { convolution_3_x_3(mem_block2[j], wc9[i][j],cur_size,i); } add_bias_and_relu1(bc9[i], cur_size, i); } reset_mem_block2(); // Layer 13 (Convolution 512 -> 512) level = 9; for (i = 0; i < 512; i++) { for (j = 0; j < 512; j++) { convolution_3_x_3_2(mem_block1[j], wc10[i][j], cur_size,i); } add_bias_and_relu2(bc10[i], cur_size, i); } reset_mem_block1(); // Layer 14 (MaxPooling) for (i = 0; i < 512; i++) { maxpooling2(mem_block2[i], cur_size, i); } cur_size /= 2; // Layer 15 (Convolution 512 -> 512) level = 10; for (i = 0; i < 512; i++) { for (j = 0; j < 512; j++) { convolution_3_x_3(mem_block2[j], wc11[i][j], cur_size,i); } add_bias_and_relu1(bc11[i], cur_size, i); } reset_mem_block2(); // Layer 16 (Convolution 512 -> 512) level = 11; for (i = 0; i < 512; i++) { for (j = 0; j < 512; j++) { convolution_3_x_3_2(mem_block1[j], wc10[i][j], cur_size,i); } add_bias_and_relu2(bc12[i], cur_size, i); } reset_mem_block1(); // Layer 17 (Convolution 512 -> 512) level = 12; for (i = 0; i < 512; i++) { for (j = 0; j < 512; j++) { convolution_3_x_3(mem_block2[j], wc11[i][j], cur_size,i); } add_bias_and_relu1(bc13[i], cur_size, i); } reset_mem_block2(); // Layer 18 (MaxPooling) for (i = 0; i < 512; i++) { maxpooling1(mem_block1[i], cur_size, i); } cur_size /= 2; // Layer 19 (Flatten) flatten(mem_block1, mem_block1_dense, 512, cur_size, cur_size); // Layer 20 (Dense) level = 0; XTime_GetTime(&tStart2); dense(mem_block1_dense, mem_block2_dense, 25088, 4096); add_bias_and_relu_flatten1(bd1, 4096, 1); reset_mem_block_dense1(mem_block1_dense); // Layer 21 (Dense) level = 1; dense1(mem_block2_dense, wd2, mem_block1_dense, 4096, 4096); XTime_GetTime(&tStart2); add_bias_and_relu_flatten2(bd2, 4096, 1); XTime_GetTime(&tEnd2); reset_mem_block_dense2(mem_block2_dense); // Layer 22 (Dense) level = 2; dense2(mem_block1_dense, wd3, mem_block2_dense,4096, 1000); double ElapsedTime1; double ElapsedTime2; XTime_GetTime(&tStart1); add_bias_and_relu_flatten3(bd3, 1000, 1); softmax(1000); XTime_GetTime(&tEnd1); ElapsedTime1 = 1.0*(tEnd1- tStart1)/(COUNTS_PER_SECOND); ElapsedTime2 = 1.0*(tEnd2- tStart2)/(COUNTS_PER_SECOND); printf("Add Bias and ReLU Flattan2 function Took %.4f seconds to execute \n", ElapsedTime2); printf("Add Bias and ReLU Flattan3-Softmax function Took %.4f \n", ElapsedTime1); //XTime_GetTime(&tStart2); //add_bias_and_relu_flatten2(bd2, 4096, 1); /* for (int i = 0; i<4096;i++){ memblock1_in[i]= mem_block1_dense[i]; /// send mem_block2_dense to ip } for (int i = 0; i<4096;i++){ bias_dense[i]= bd2[i]; /// send bias dense to ip } control_1[0] |=0x1; // start while((control_1[0]&0x2) != 0x2); // wait for done for (int i=0; i<4096;i++){ mem_block1_dense[i] = memblock1_out[i]; } */ /* for (int i = 0; i<1000;i++){ soft_input[i]= mem_block2_dense[i]; /// send mem_block2_dense to ip } for (int i = 0; i<1000;i++){ soft_parameter[i]= bd3[i]; /// send bias dense to ip } soft_control[0] |=0x1; // start while((soft_control[0]&0x2) != 0x2); // wait for done //XTime_GetTime(&tEnd1); // ElapsedTime1 = 1.0*(tEnd1- tStart1)/(COUNTS_PER_SECOND); // //printf("Took %.4f seconds to execute \n", ElapsedTime1); for (int i=0; i<1000;i++){ //printf("class: %d ",soft_output[i]); //printf("result: %f\n",soft_output1[i]); } */ return; } char *trimwhitespace(char *str) { char *end; // Trim leading space while (isspace((unsigned char)*str)) str++; if (*str == 0) // All spaces? return str; // Trim trailing space end = str + strlen(str) - 1; while (end > str && isspace((unsigned char)*end)) end--; // Write new null terminator *(end + 1) = 0; return str; } int main() { //xil_//printf("hata\n"); int lvls = -1; double ElapsedTime; //time_t timeStart, timeEnd; // double deltaTime; read_weights(lvls); ////printf("Reading weights: sec\n"); read_image(); normalize_image(); ////printf("Infer image sec\n"); //gettimeofday(&timeStart, NULL); XTime_GetTime(&tStart); get_VGG16(); XTime_GetTime(&tEnd); ElapsedTime = 1.0*(tEnd- tStart)/(COUNTS_PER_SECOND); //gettimeofday(&timeEnd, NULL); //deltaTime = get_seconds(timeStart, timeEnd); ////printf("Infer image %.3lf sec\n", deltaTime); /////printf("Took %.4f seconds to execute \n", ElapsedTime); ////printf("Infer image sec\n"); return 0; }
picolrn.c
/* * This code is released under the MIT License. * Copyright (c) 2013 Nenad Markus */ #include <stdio.h> #include <malloc.h> #include <math.h> #include <stdint.h> #include <omp.h> // hyperparameters #define NRANDS 1024 /* auxiliary stuff */ #define MAX(a, b) ((a)>(b)?(a):(b)) #define MIN(a, b) ((a)<(b)?(a):(b)) #define SQR(x) ((x)*(x)) /* portable time function */ #ifdef __GNUC__ #include <time.h> float getticks() { struct timespec ts; if(clock_gettime(CLOCK_MONOTONIC, &ts) < 0) return -1.0f; return ts.tv_sec + 1e-9f*ts.tv_nsec; } #else #include <windows.h> float getticks() { static double freq = -1.0; LARGE_INTEGER lint; if(freq < 0.0) { if(!QueryPerformanceFrequency(&lint)) return -1.0f; freq = lint.QuadPart; } if(!QueryPerformanceCounter(&lint)) return -1.0f; return (float)( lint.QuadPart/freq ); } #endif /* multiply with carry PRNG */ uint32_t mwcrand_r(uint64_t* state) { uint32_t* m; // m = (uint32_t*)state; // bad state? if(m[0] == 0) m[0] = 0xAAAA; if(m[1] == 0) m[1] = 0xBBBB; // mutate state m[0] = 36969 * (m[0] & 65535) + (m[0] >> 16); m[1] = 18000 * (m[1] & 65535) + (m[1] >> 16); // output return (m[0] << 16) + m[1]; } uint64_t prngglobal = 0x12345678000fffffLL; void smwcrand(uint32_t seed) { prngglobal = 0x12345678000fffffLL*seed; } uint32_t mwcrand() { return mwcrand_r(&prngglobal); } /* */ #define MAX_N 2000000 int N = 0; uint8_t* ppixels[MAX_N]; int pdims[MAX_N][2]; // (nrows, ncols) int nbackground = 0; int background[MAX_N]; // i int nobjects = 0; int objects[MAX_N][4]; // (r, c, s, i) int load_image(uint8_t* pixels[], int* nrows, int* ncols, FILE* file) { /* - loads an 8-bit grey image saved in the <RID> file format - <RID> file contents: - a 32-bit signed integer h (image height) - a 32-bit signed integer w (image width) - an array of w*h unsigned bytes representing pixel intensities */ // if(fread(nrows, sizeof(int), 1, file) != 1) return 0; if(fread(ncols, sizeof(int), 1, file) != 1) return 0; // *pixels = (uint8_t*)malloc(*nrows**ncols*sizeof(uint8_t)); if(!*pixels) return 0; // read pixels if(fread(*pixels, sizeof(uint8_t), *nrows**ncols, file) != *nrows**ncols) return 0; // we're done return 1; } int load_training_data(char* path) { FILE* file; // file = fopen(path, "rb"); if(!file) return 0; // N = 0; nbackground = 0; nobjects = 0; while( load_image(&ppixels[N], &pdims[N][0], &pdims[N][1], file) ) { int i, n; // if(fread(&n, sizeof(int), 1, file) != 1) return 1; if(!n) { background[nbackground] = N; ++nbackground; } else { for(i=0; i<n; ++i) { fread(&objects[nobjects][0], sizeof(int), 1, file); // r fread(&objects[nobjects][1], sizeof(int), 1, file); // c fread(&objects[nobjects][2], sizeof(int), 1, file); // s objects[nobjects][3] = N; // i // ++nobjects; } } // ++N; } // return 1; } /* regression trees */ int bintest(int32_t tcode, int r, int c, int s, int iind) { // int r1, c1, r2, c2; int8_t* p = (int8_t*)&tcode; // r1 = (256*r + p[0]*s)/256; c1 = (256*c + p[1]*s)/256; r2 = (256*r + p[2]*s)/256; c2 = (256*c + p[3]*s)/256; // r1 = MIN(MAX(0, r1), pdims[iind][0]-1); c1 = MIN(MAX(0, c1), pdims[iind][1]-1); r2 = MIN(MAX(0, r2), pdims[iind][0]-1); c2 = MIN(MAX(0, c2), pdims[iind][1]-1); // return ppixels[iind][r1*pdims[iind][1]+c1]<=ppixels[iind][r2*pdims[iind][1]+c2]; } float get_split_error(int32_t tcode, float tvals[], int rs[], int cs[], int ss[], int iinds[], double ws[], int inds[], int indsnum) { int i, j; double wsum, wsum0, wsum1; double wtvalsum0, wtvalsumsqr0, wtvalsum1, wtvalsumsqr1; double wmse0, wmse1; // wsum = wsum0 = wsum1 = wtvalsum0 = wtvalsum1 = wtvalsumsqr0 = wtvalsumsqr1 = 0.0; for(i=0; i<indsnum; ++i) { if( bintest(tcode, rs[inds[i]], cs[inds[i]], ss[inds[i]], iinds[inds[i]]) ) { wsum1 += ws[inds[i]]; wtvalsum1 += ws[inds[i]]*tvals[inds[i]]; wtvalsumsqr1 += ws[inds[i]]*SQR(tvals[inds[i]]); } else { wsum0 += ws[inds[i]]; wtvalsum0 += ws[inds[i]]*tvals[inds[i]]; wtvalsumsqr0 += ws[inds[i]]*SQR(tvals[inds[i]]); } wsum += ws[inds[i]]; } // wmse0 = wtvalsumsqr0 - SQR(wtvalsum0)/wsum0; wmse1 = wtvalsumsqr1 - SQR(wtvalsum1)/wsum1; // return (float)( (wmse0 + wmse1)/wsum ); } int split_training_data(int32_t tcode, float tvals[], int rs[], int cs[], int ss[], int iinds[], double ws[], int inds[], int ninds) { int stop; int i, j; int n0; // stop = 0; i = 0; j = ninds - 1; while(!stop) { // while( !bintest(tcode, rs[inds[i]], cs[inds[i]], ss[inds[i]], iinds[inds[i]]) ) { if( i==j ) break; else ++i; } while( bintest(tcode, rs[inds[j]], cs[inds[j]], ss[inds[j]], iinds[inds[j]]) ) { if( i==j ) break; else --j; } // if( i==j ) stop = 1; else { // swap inds[i] = inds[i] ^ inds[j]; inds[j] = inds[i] ^ inds[j]; inds[i] = inds[i] ^ inds[j]; } } // n0 = 0; for(i=0; i<ninds; ++i) if( !bintest(tcode, rs[inds[i]], cs[inds[i]], ss[inds[i]], iinds[inds[i]]) ) ++n0; // return n0; } int32_t get_random_tcode(int8_t* bbox) { int32_t tcode; int8_t* p; // p = (int8_t*)&tcode; // p[0] = bbox[0] + mwcrand()%(bbox[1]-bbox[0]+1); p[1] = bbox[2] + mwcrand()%(bbox[3]-bbox[2]+1); p[2] = bbox[0] + mwcrand()%(bbox[1]-bbox[0]+1); p[3] = bbox[2] + mwcrand()%(bbox[3]-bbox[2]+1); // return tcode; } int grow_subtree(int32_t tcodes[], float lut[], int nodeidx, int d, int maxd, float tvals[], int rs[], int cs[], int ss[], int iinds[], double ws[], int inds[], int ninds, int8_t* bbox) { int i, nrands; int32_t tmptcodes[2048]; float es[2048], e; int n0; // if(d == maxd) { int lutidx; double tvalaccum, wsum; // lutidx = nodeidx - ((1<<maxd)-1); // compute output: a simple average tvalaccum = 0.0; wsum = 0.0; for(i=0; i<ninds; ++i) { tvalaccum += ws[inds[i]]*tvals[inds[i]]; wsum += ws[inds[i]]; } if(wsum == 0.0) lut[lutidx] = 0.0f; else lut[lutidx] = (float)( tvalaccum/wsum ); // return 1; } else if(ninds <= 1) { // tcodes[nodeidx] = 0; // grow_subtree(tcodes, lut, 2*nodeidx+1, d+1, maxd, tvals, rs, cs, ss, iinds, ws, inds, ninds, bbox); grow_subtree(tcodes, lut, 2*nodeidx+2, d+1, maxd, tvals, rs, cs, ss, iinds, ws, inds, ninds, bbox); return 1; } // generate binary test codes nrands = NRANDS; for(i=0; i<nrands; ++i) tmptcodes[i] = get_random_tcode(bbox); // #pragma omp parallel for for(i=0; i<nrands; ++i) es[i] = get_split_error(tmptcodes[i], tvals, rs, cs, ss, iinds, ws, inds, ninds); // e = es[0]; tcodes[nodeidx] = tmptcodes[0]; for(i=1; i<nrands; ++i) if(e > es[i]) { e = es[i]; tcodes[nodeidx] = tmptcodes[i]; } // n0 = split_training_data(tcodes[nodeidx], tvals, rs, cs, ss, iinds, ws, inds, ninds); // grow_subtree(tcodes, lut, 2*nodeidx+1, d+1, maxd, tvals, rs, cs, ss, iinds, ws, &inds[0], n0, bbox); grow_subtree(tcodes, lut, 2*nodeidx+2, d+1, maxd, tvals, rs, cs, ss, iinds, ws, &inds[n0], ninds-n0, bbox); // return 1; } int grow_rtree(int32_t tcodes[], float lut[], int d, float tvals[], int rs[], int cs[], int ss[], int iinds[], double ws[], int n, int8_t* bbox) { int i; int* inds; // inds = (int*)malloc(n*sizeof(int)); for(i=0; i<n; ++i) inds[i] = i; // if(!grow_subtree(tcodes, lut, 0, 0, d, tvals, rs, cs, ss, iinds, ws, inds, n, bbox)) { free(inds); return 0; } else { free(inds); return 1; } } /* */ int32_t version = 3; int tdepth; int ntrees=0; int8_t bbox[4]; // (r_min, r_max, c_min, c_max) int32_t tcodes[4096][1024]; float luts[4096][1024]; float thresholds[4096]; /* */ int load_cascade_from_file(const char* path) { int i; FILE* file; // file = fopen(path, "rb"); if(!file) return 0; // fread(&version, sizeof(int32_t), 1, file); fread(&bbox[0], sizeof(int8_t), 4, file); fread(&tdepth, sizeof(int), 1, file); fread(&ntrees, sizeof(int), 1, file); for(i=0; i<ntrees; ++i) { // fread(&tcodes[i][0], sizeof(int32_t), (1<<tdepth)-1, file); fread(&luts[i][0], sizeof(float), 1<<tdepth, file); fread(&thresholds[i], sizeof(float), 1, file); } // fclose(file); // return 1; } int save_cascade_to_file(const char* path) { int i; FILE* file; // file = fopen(path, "wb"); if(!file) return 0; // fwrite(&version, sizeof(int32_t), 1, file); fwrite(&bbox[0], sizeof(int8_t), 4, file); fwrite(&tdepth, sizeof(int), 1, file); fwrite(&ntrees, sizeof(int), 1, file); for(i=0; i<ntrees; ++i) { // fwrite(&tcodes[i][0], sizeof(int32_t), (1<<tdepth)-1, file); fwrite(&luts[i][0], sizeof(float), 1<<tdepth, file); fwrite(&thresholds[i], sizeof(float), 1, file); } // fclose(file); // return 1; } /* */ float get_tree_output(int i, int r, int c, int s, int iind) { int idx, j; // idx = 1; for(j=0; j<tdepth; ++j) idx = 2*idx + bintest(tcodes[i][idx-1], r, c, s, iind); // return luts[i][idx - (1<<tdepth)]; } int classify_region(float* o, int r, int c, int s, int iind) { int i, sr, sc; // *o = 0.0f; if(!ntrees) return 1; // for(i=0; i<ntrees; ++i) { // *o += get_tree_output(i, r, c, s, iind); // if(*o <= thresholds[i]) return -1; } // return 1; } int learn_new_stage(float mintpr, float maxfpr, int maxntrees, float tvals[], int rs[], int cs[], int ss[], int iinds[], float os[], int np, int nn) { int i; double* ws; double wsum; float threshold, tpr, fpr; // printf("* learning a new stage ...\n"); // ws = (double*)malloc((np+nn)*sizeof(double)); // maxntrees = ntrees + maxntrees; fpr = 1.0f; while(ntrees<maxntrees && fpr>maxfpr) { float t; int numtps, numfps; // t = getticks(); // compute weights ... wsum = 0.0; for(i=0; i<np+nn; ++i) { if(tvals[i] > 0) ws[i] = exp(-1.0*os[i])/np; else ws[i] = exp(+1.0*os[i])/nn; wsum += ws[i]; } for(i=0; i<np+nn; ++i) ws[i] /= wsum; // grow a tree ... grow_rtree(tcodes[ntrees], luts[ntrees], tdepth, tvals, rs, cs, ss, iinds, ws, np+nn, bbox); thresholds[ntrees] = -1337.0f; ++ntrees; // update outputs ... for(i=0; i<np+nn; ++i) { float o; // o = get_tree_output(ntrees-1, rs[i], cs[i], ss[i], iinds[i]); // os[i] += o; } // get threshold ... threshold = 5.0f; do { // threshold -= 0.005f; numtps = 0; numfps = 0; // for(i=0; i<np+nn; ++i) { if( tvals[i]>0 && os[i]>threshold) ++numtps; if( tvals[i]<0 && os[i]>threshold) ++numfps; } // tpr = numtps/(float)np; fpr = numfps/(float)nn; } while(tpr<mintpr); printf(" ** tree %d (%d [s]) ... stage tpr=%f, stage fpr=%f\n", ntrees, (int)(getticks()-t), tpr, fpr); fflush(stdout); } // thresholds[ntrees-1] = threshold; printf(" ** threshold set to %f\n", threshold); // free(ws); // return 1; } float sample_training_data(float tvals[], int rs[], int cs[], int ss[], int iinds[], float os[], int* np, int* nn) { int i, n; int64_t nw; float etpr, efpr; int t; #define NUMPRNGS 1024 static int prngsinitialized = 0; static uint64_t prngs[NUMPRNGS]; int stop; // t = getticks(); // n = 0; /* object samples */ for(i=0; i<nobjects; ++i) if( classify_region(&os[n], objects[i][0], objects[i][1], objects[i][2], objects[i][3]) == 1 ) { // rs[n] = objects[i][0]; cs[n] = objects[i][1]; ss[n] = objects[i][2]; iinds[n] = objects[i][3]; tvals[n] = +1; // ++n; } *np = n; /* non-object samples */ if(!prngsinitialized) { // initialize a PRNG for each thread for(i=0; i<NUMPRNGS; ++i) prngs[i] = 0xFFFF*mwcrand() + 0xFFFF1234FFFF0001LL*mwcrand(); // prngsinitialized = 1; } // nw = 0; *nn = 0; stop = 0; if(nbackground) { #pragma omp parallel { int thid; // thid = omp_get_thread_num(); while(!stop) { /* data mine hard negatives */ float o; int iind, s, r, c, nrows, ncols; uint8_t* pixels; // iind = background[ mwcrand_r(&prngs[thid])%nbackground ]; // r = mwcrand_r(&prngs[thid])%pdims[iind][0]; c = mwcrand_r(&prngs[thid])%pdims[iind][1]; s = objects[mwcrand_r(&prngs[thid])%nobjects][2]; // sample the size of a random object in the pool // if( classify_region(&o, r, c, s, iind) == 1 ) { //we have a false positive ... #pragma omp critical { if(*nn<*np) { rs[n] = r; cs[n] = c; ss[n] = s; iinds[n] = iind; os[n] = o; tvals[n] = -1; // ++n; ++*nn; } else stop = 1; } } if(!stop) { #pragma omp atomic ++nw; } } } } else nw = 1; /* print the estimated true positive and false positive rates */ etpr = *np/(float)nobjects; efpr = (float)( *nn/(double)nw ); printf("* sampling finished ...\n"); printf(" ** elapsed time: %d\n", (int)(getticks()-t)); printf(" ** cascade TPR=%.8f\n", etpr); printf(" ** cascade FPR=%.8f (%d/%lld)\n", efpr, *nn, (long long int)nw); /* */ return efpr; } /* */ static int rs[2*MAX_N]; static int cs[2*MAX_N]; static int ss[2*MAX_N]; static int iinds[2*MAX_N]; static float tvals[2*MAX_N]; static float os[2*MAX_N]; int learn_with_default_parameters(char* trdata, char* dst) { int i, np, nn; float fpr; // if(!load_training_data(trdata)) { printf("* cannot load training data ...\n"); return 0; } // bbox[0] = -127; bbox[1] = +127; bbox[2] = -127; bbox[3] = +127; tdepth = 5; if(!save_cascade_to_file(dst)) return 0; // sample_training_data(tvals, rs, cs, ss, iinds, os, &np, &nn); learn_new_stage(0.9800f, 0.5f, 4, tvals, rs, cs, ss, iinds, os, np, nn); save_cascade_to_file(dst); printf("\n"); sample_training_data(tvals, rs, cs, ss, iinds, os, &np, &nn); learn_new_stage(0.9850f, 0.5f, 8, tvals, rs, cs, ss, iinds, os, np, nn); save_cascade_to_file(dst); printf("\n"); sample_training_data(tvals, rs, cs, ss, iinds, os, &np, &nn); learn_new_stage(0.9900f, 0.5f, 16, tvals, rs, cs, ss, iinds, os, np, nn); save_cascade_to_file(dst); printf("\n"); sample_training_data(tvals, rs, cs, ss, iinds, os, &np, &nn); learn_new_stage(0.9950f, 0.5f, 32, tvals, rs, cs, ss, iinds, os, np, nn); save_cascade_to_file(dst); printf("\n"); // while(sample_training_data(tvals, rs, cs, ss, iinds, os, &np, &nn) > 1e-6f) { learn_new_stage(0.9975f, 0.5f, 64, tvals, rs, cs, ss, iinds, os, np, nn); save_cascade_to_file(dst); printf("\n"); } // printf("* target FPR achieved ... terminating the learning process ...\n"); } /* */ const char* howto() { return "TODO\n" ; } int main(int argc, char* argv[]) { // initialize the PRNG smwcrand(time(0)); // if(argc == 3) { learn_with_default_parameters(argv[1], argv[2]); } else if(argc == 7) { int dummy; // sscanf(argv[1], "%d", &dummy); bbox[0] = dummy; sscanf(argv[2], "%d", &dummy); bbox[1] = dummy; sscanf(argv[3], "%d", &dummy); bbox[2] = dummy; sscanf(argv[4], "%d", &dummy); bbox[3] = dummy; // sscanf(argv[5], "%d", &tdepth); // ntrees = 0; // if(!save_cascade_to_file(argv[6])) return 0; // printf("* initializing:\n"); printf(" ** bbox = (%d, %d, %d, %d)\n", bbox[0], bbox[1], bbox[2], bbox[3]); printf(" ** tdepth = %d\n", tdepth); // return 0; } else if(argc == 7) { float tpr, fpr; int ntrees, np, nn; // if(!load_cascade_from_file(argv[1])) { printf("* cannot load a cascade from '%s'\n", argv[1]); return 1; } if(!load_training_data(argv[2])) { printf("* cannot load the training data from '%s'\n", argv[2]); return 1; } // sscanf(argv[3], "%f", &tpr); sscanf(argv[4], "%f", &fpr); sscanf(argv[5], "%d", &ntrees); // sample_training_data(tvals, rs, cs, ss, iinds, os, &np, &nn); learn_new_stage(tpr, fpr, ntrees, tvals, rs, cs, ss, iinds, os, np, nn); // if(!save_cascade_to_file(argv[6])) return 1; } else { printf("%s", howto()); return 0; } // return 0; }
munit.c
#pragma GCC diagnostic ignored "-Wsign-conversion" /* Copyright (c) 2013-2018 Evan Nemerson <evan@nemerson.com> * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, * modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /*** Configuration ***/ /* This is just where the output from the test goes. It's really just * meant to let you choose stdout or stderr, but if anyone really want * to direct it to a file let me know, it would be fairly easy to * support. */ #if !defined(MUNIT_OUTPUT_FILE) # define MUNIT_OUTPUT_FILE stdout #endif /* This is a bit more useful; it tells µnit how to format the seconds in * timed tests. If your tests run for longer you might want to reduce * it, and if your computer is really fast and your tests are tiny you * can increase it. */ #if !defined(MUNIT_TEST_TIME_FORMAT) # define MUNIT_TEST_TIME_FORMAT "0.8f" #endif /* If you have long test names you might want to consider bumping * this. The result information takes 43 characters. */ #if !defined(MUNIT_TEST_NAME_LEN) # define MUNIT_TEST_NAME_LEN 37 #endif /* If you don't like the timing information, you can disable it by * defining MUNIT_DISABLE_TIMING. */ #if !defined(MUNIT_DISABLE_TIMING) # define MUNIT_ENABLE_TIMING #endif /*** End configuration ***/ #if defined(_POSIX_C_SOURCE) && (_POSIX_C_SOURCE < 200809L) # undef _POSIX_C_SOURCE #endif #if !defined(_POSIX_C_SOURCE) # define _POSIX_C_SOURCE 200809L #endif /* Solaris freaks out if you try to use a POSIX or SUS standard without * the "right" C standard. */ #if defined(_XOPEN_SOURCE) # undef _XOPEN_SOURCE #endif #if defined(__STDC_VERSION__) # if __STDC_VERSION__ >= 201112L # define _XOPEN_SOURCE 700 # elif __STDC_VERSION__ >= 199901L # define _XOPEN_SOURCE 600 # endif #endif /* Because, according to Microsoft, POSIX is deprecated. You've got * to appreciate the chutzpah. */ #if defined(_MSC_VER) && !defined(_CRT_NONSTDC_NO_DEPRECATE) # define _CRT_NONSTDC_NO_DEPRECATE #endif #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) # include <stdbool.h> #elif defined(_WIN32) /* https://msdn.microsoft.com/en-us/library/tf4dy80a.aspx */ #endif #include <limits.h> #include <time.h> #include <errno.h> #include <string.h> #include <stdlib.h> #include <stdio.h> #include <stdarg.h> #include <setjmp.h> #if !defined(MUNIT_NO_NL_LANGINFO) && !defined(_WIN32) #define MUNIT_NL_LANGINFO #include <locale.h> #include <langinfo.h> #include <strings.h> #endif #if !defined(_WIN32) # include <unistd.h> # include <sys/types.h> # include <sys/wait.h> #else # include <windows.h> # include <io.h> # include <fcntl.h> # if !defined(STDERR_FILENO) # define STDERR_FILENO _fileno(stderr) # endif #endif #include "munit.h" #define MUNIT_STRINGIFY(x) #x #define MUNIT_XSTRINGIFY(x) MUNIT_STRINGIFY(x) #if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_CC) || defined(__IBMCPP__) # define MUNIT_THREAD_LOCAL __thread #elif (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201102L)) || defined(_Thread_local) # define MUNIT_THREAD_LOCAL _Thread_local #elif defined(_WIN32) # define MUNIT_THREAD_LOCAL __declspec(thread) #endif /* MSVC 12.0 will emit a warning at /W4 for code like 'do { ... } * while (0)', or 'do { ... } while (1)'. I'm pretty sure nobody * at Microsoft compiles with /W4. */ #if defined(_MSC_VER) && (_MSC_VER <= 1800) #pragma warning(disable: 4127) #endif #if defined(_WIN32) || defined(__EMSCRIPTEN__) # define MUNIT_NO_FORK #endif #if defined(__EMSCRIPTEN__) # define MUNIT_NO_BUFFER #endif /*** Logging ***/ static MunitLogLevel munit_log_level_visible = MUNIT_LOG_INFO; static MunitLogLevel munit_log_level_fatal = MUNIT_LOG_ERROR; #if defined(MUNIT_THREAD_LOCAL) static MUNIT_THREAD_LOCAL munit_bool munit_error_jmp_buf_valid = 0; static MUNIT_THREAD_LOCAL jmp_buf munit_error_jmp_buf; #endif /* At certain warning levels, mingw will trigger warnings about * suggesting the format attribute, which we've explicity *not* set * because it will then choke on our attempts to use the MS-specific * I64 modifier for size_t (which we have to use since MSVC doesn't * support the C99 z modifier). */ #if defined(__MINGW32__) || defined(__MINGW64__) # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wsuggest-attribute=format" #endif MUNIT_PRINTF(5,0) static void munit_logf_exv(MunitLogLevel level, FILE* fp, const char* filename, int line, const char* format, va_list ap) { if (level < munit_log_level_visible) return; switch (level) { case MUNIT_LOG_DEBUG: fputs("Debug", fp); break; case MUNIT_LOG_INFO: fputs("Info", fp); break; case MUNIT_LOG_WARNING: fputs("Warning", fp); break; case MUNIT_LOG_ERROR: fputs("Error", fp); break; default: munit_logf_ex(MUNIT_LOG_ERROR, filename, line, "Invalid log level (%d)", level); return; } fputs(": ", fp); if (filename != NULL) fprintf(fp, "%s:%d: ", filename, line); vfprintf(fp, format, ap); fputc('\n', fp); } MUNIT_PRINTF(3,4) static void munit_logf_internal(MunitLogLevel level, FILE* fp, const char* format, ...) { va_list ap; va_start(ap, format); munit_logf_exv(level, fp, NULL, 0, format, ap); va_end(ap); } static void munit_log_internal(MunitLogLevel level, FILE* fp, const char* message) { munit_logf_internal(level, fp, "%s", message); } void munit_logf_ex(MunitLogLevel level, const char* filename, int line, const char* format, ...) { va_list ap; va_start(ap, format); munit_logf_exv(level, stderr, filename, line, format, ap); va_end(ap); if (level >= munit_log_level_fatal) { #if defined(MUNIT_THREAD_LOCAL) if (munit_error_jmp_buf_valid) longjmp(munit_error_jmp_buf, 1); #endif abort(); } } void munit_errorf_ex(const char* filename, int line, const char* format, ...) { va_list ap; va_start(ap, format); munit_logf_exv(MUNIT_LOG_ERROR, stderr, filename, line, format, ap); va_end(ap); #if defined(MUNIT_THREAD_LOCAL) if (munit_error_jmp_buf_valid) longjmp(munit_error_jmp_buf, 1); #endif abort(); } #if defined(__MINGW32__) || defined(__MINGW64__) #pragma GCC diagnostic pop #endif #if !defined(MUNIT_STRERROR_LEN) # define MUNIT_STRERROR_LEN 80 #endif static void munit_log_errno(MunitLogLevel level, FILE* fp, const char* msg) { #if defined(MUNIT_NO_STRERROR_R) || (defined(__MINGW32__) && !defined(MINGW_HAS_SECURE_API)) munit_logf_internal(level, fp, "%s: %s (%d)", msg, strerror(errno), errno); #else char munit_error_str[MUNIT_STRERROR_LEN]; munit_error_str[0] = '\0'; #if !defined(_WIN32) strerror_r(errno, munit_error_str, MUNIT_STRERROR_LEN); #else strerror_s(munit_error_str, MUNIT_STRERROR_LEN, errno); #endif munit_logf_internal(level, fp, "%s: %s (%d)", msg, munit_error_str, errno); #endif } /*** Memory allocation ***/ void* munit_malloc_ex(const char* filename, int line, size_t size) { void* ptr; if (size == 0) return NULL; ptr = calloc(1, size); if (MUNIT_UNLIKELY(ptr == NULL)) { munit_logf_ex(MUNIT_LOG_ERROR, filename, line, "Failed to allocate %" MUNIT_SIZE_MODIFIER "u bytes.", size); } return ptr; } /*** Timer code ***/ #if defined(MUNIT_ENABLE_TIMING) #define psnip_uint64_t munit_uint64_t #define psnip_uint32_t munit_uint32_t /* Code copied from portable-snippets * <https://github.com/nemequ/portable-snippets/>. If you need to * change something, please do it there so we can keep the code in * sync. */ /* Clocks (v1) * Portable Snippets - https://gitub.com/nemequ/portable-snippets * Created by Evan Nemerson <evan@nemerson.com> * * To the extent possible under law, the authors have waived all * copyright and related or neighboring rights to this code. For * details, see the Creative Commons Zero 1.0 Universal license at * https://creativecommons.org/publicdomain/zero/1.0/ */ #if !defined(PSNIP_CLOCK_H) #define PSNIP_CLOCK_H #if !defined(psnip_uint64_t) # include "../exact-int/exact-int.h" #endif #if !defined(PSNIP_CLOCK_STATIC_INLINE) # if defined(__GNUC__) # define PSNIP_CLOCK__COMPILER_ATTRIBUTES __attribute__((__unused__)) # else # define PSNIP_CLOCK__COMPILER_ATTRIBUTES # endif # define PSNIP_CLOCK__FUNCTION PSNIP_CLOCK__COMPILER_ATTRIBUTES static #endif enum PsnipClockType { /* This clock provides the current time, in units since 1970-01-01 * 00:00:00 UTC not including leap seconds. In other words, UNIX * time. Keep in mind that this clock doesn't account for leap * seconds, and can go backwards (think NTP adjustments). */ PSNIP_CLOCK_TYPE_WALL = 1, /* The CPU time is a clock which increases only when the current * process is active (i.e., it doesn't increment while blocking on * I/O). */ PSNIP_CLOCK_TYPE_CPU = 2, /* Monotonic time is always running (unlike CPU time), but it only ever moves forward unless you reboot the system. Things like NTP adjustments have no effect on this clock. */ PSNIP_CLOCK_TYPE_MONOTONIC = 3 }; struct PsnipClockTimespec { psnip_uint64_t seconds; psnip_uint64_t nanoseconds; }; /* Methods we support: */ #define PSNIP_CLOCK_METHOD_CLOCK_GETTIME 1 #define PSNIP_CLOCK_METHOD_TIME 2 #define PSNIP_CLOCK_METHOD_GETTIMEOFDAY 3 #define PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER 4 #define PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME 5 #define PSNIP_CLOCK_METHOD_CLOCK 6 #define PSNIP_CLOCK_METHOD_GETPROCESSTIMES 7 #define PSNIP_CLOCK_METHOD_GETRUSAGE 8 #define PSNIP_CLOCK_METHOD_GETSYSTEMTIMEPRECISEASFILETIME 9 #define PSNIP_CLOCK_METHOD_GETTICKCOUNT64 10 #include <assert.h> #if defined(HEDLEY_UNREACHABLE) # define PSNIP_CLOCK_UNREACHABLE() HEDLEY_UNREACHABLE() #else # define PSNIP_CLOCK_UNREACHABLE() assert(0) #endif /* Choose an implementation */ /* #undef PSNIP_CLOCK_WALL_METHOD */ /* #undef PSNIP_CLOCK_CPU_METHOD */ /* #undef PSNIP_CLOCK_MONOTONIC_METHOD */ /* We want to be able to detect the libc implementation, so we include <limits.h> (<features.h> isn't available everywhere). */ #if defined(__unix__) || defined(__unix) || defined(__linux__) # include <limits.h> # include <unistd.h> #endif #if defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0) /* These are known to work without librt. If you know of others * please let us know so we can add them. */ # if \ (defined(__GLIBC__) && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 17))) || \ (defined(__FreeBSD__)) # define PSNIP_CLOCK_HAVE_CLOCK_GETTIME # elif !defined(PSNIP_CLOCK_NO_LIBRT) # define PSNIP_CLOCK_HAVE_CLOCK_GETTIME # endif #endif #if defined(_WIN32) # if !defined(PSNIP_CLOCK_CPU_METHOD) # define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_GETPROCESSTIMES # endif # if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER # endif #endif #if defined(__MACH__) && !defined(__gnu_hurd__) # if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME # endif #endif #if defined(PSNIP_CLOCK_HAVE_CLOCK_GETTIME) # include <time.h> # if !defined(PSNIP_CLOCK_WALL_METHOD) # if defined(CLOCK_REALTIME_PRECISE) # define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_WALL CLOCK_REALTIME_PRECISE # elif !defined(__sun) # define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_WALL CLOCK_REALTIME # endif # endif # if !defined(PSNIP_CLOCK_CPU_METHOD) # if defined(_POSIX_CPUTIME) || defined(CLOCK_PROCESS_CPUTIME_ID) # define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_CPU CLOCK_PROCESS_CPUTIME_ID # elif defined(CLOCK_VIRTUAL) # define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_CPU CLOCK_VIRTUAL # endif # endif # if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) # if defined(CLOCK_MONOTONIC_RAW) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC # elif defined(CLOCK_MONOTONIC_PRECISE) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC_PRECISE # elif defined(_POSIX_MONOTONIC_CLOCK) || defined(CLOCK_MONOTONIC) # define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME # define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC # endif # endif #endif #if defined(_POSIX_VERSION) && (_POSIX_VERSION >= 200112L) # if !defined(PSNIP_CLOCK_WALL_METHOD) # define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_GETTIMEOFDAY # endif #endif #if !defined(PSNIP_CLOCK_WALL_METHOD) # define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_TIME #endif #if !defined(PSNIP_CLOCK_CPU_METHOD) # define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK #endif /* Primarily here for testing. */ #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) && defined(PSNIP_CLOCK_REQUIRE_MONOTONIC) # error No monotonic clock found. #endif /* Implementations */ #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_TIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_TIME)) # include <time.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) # include <sys/time.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) # include <windows.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) # include <sys/time.h> # include <sys/resource.h> #endif #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) # include <CoreServices/CoreServices.h> # include <mach/mach.h> # include <mach/mach_time.h> #endif /*** Implementations ***/ #define PSNIP_CLOCK_NSEC_PER_SEC ((psnip_uint32_t) (1000000000ULL)) #if \ (defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \ (defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock__clock_getres (clockid_t clk_id) { struct timespec res; int r; r = clock_getres(clk_id, &res); if (r != 0) return 0; return (psnip_uint32_t) (PSNIP_CLOCK_NSEC_PER_SEC / res.tv_nsec); } PSNIP_CLOCK__FUNCTION int psnip_clock__clock_gettime (clockid_t clk_id, struct PsnipClockTimespec* res) { struct timespec ts; if (clock_gettime(clk_id, &ts) != 0) return -10; res->seconds = (psnip_uint64_t) (ts.tv_sec); res->nanoseconds = (psnip_uint64_t) (ts.tv_nsec); return 0; } #endif PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_wall_get_precision (void) { #if !defined(PSNIP_CLOCK_WALL_METHOD) return 0; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_WALL); #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY return 1000000; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME return 1; #else return 0; #endif } PSNIP_CLOCK__FUNCTION int psnip_clock_wall_get_time (struct PsnipClockTimespec* res) { (void) res; #if !defined(PSNIP_CLOCK_WALL_METHOD) return -2; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_WALL, res); #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME res->seconds = time(NULL); res->nanoseconds = 0; #elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY struct timeval tv; if (gettimeofday(&tv, NULL) != 0) return -6; res->seconds = tv.tv_sec; res->nanoseconds = tv.tv_usec * 1000; #else return -2; #endif return 0; } PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_cpu_get_precision (void) { #if !defined(PSNIP_CLOCK_CPU_METHOD) return 0; #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_CPU); #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK return CLOCKS_PER_SEC; #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES return PSNIP_CLOCK_NSEC_PER_SEC / 100; #else return 0; #endif } PSNIP_CLOCK__FUNCTION int psnip_clock_cpu_get_time (struct PsnipClockTimespec* res) { #if !defined(PSNIP_CLOCK_CPU_METHOD) (void) res; return -2; #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_CPU, res); #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK clock_t t = clock(); if (t == ((clock_t) -1)) return -5; res->seconds = t / CLOCKS_PER_SEC; res->nanoseconds = (t % CLOCKS_PER_SEC) * (PSNIP_CLOCK_NSEC_PER_SEC / CLOCKS_PER_SEC); #elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES FILETIME CreationTime, ExitTime, KernelTime, UserTime; LARGE_INTEGER date, adjust; if (!GetProcessTimes(GetCurrentProcess(), &CreationTime, &ExitTime, &KernelTime, &UserTime)) return -7; /* http://www.frenk.com/2009/12/convert-filetime-to-unix-timestamp/ */ date.HighPart = UserTime.dwHighDateTime; date.LowPart = UserTime.dwLowDateTime; adjust.QuadPart = 11644473600000 * 10000; date.QuadPart -= adjust.QuadPart; res->seconds = date.QuadPart / 10000000; res->nanoseconds = (date.QuadPart % 10000000) * (PSNIP_CLOCK_NSEC_PER_SEC / 100); #elif PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE struct rusage usage; if (getrusage(RUSAGE_SELF, &usage) != 0) return -8; res->seconds = usage.ru_utime.tv_sec; res->nanoseconds = tv.tv_usec * 1000; #else (void) res; return -2; #endif return 0; } PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_monotonic_get_precision (void) { #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) return 0; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC); #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME static mach_timebase_info_data_t tbi = { 0, }; if (tbi.denom == 0) mach_timebase_info(&tbi); return (psnip_uint32_t) (tbi.numer / tbi.denom); #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64 return 1000; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER LARGE_INTEGER Frequency; QueryPerformanceFrequency(&Frequency); return (psnip_uint32_t) ((Frequency.QuadPart > PSNIP_CLOCK_NSEC_PER_SEC) ? PSNIP_CLOCK_NSEC_PER_SEC : Frequency.QuadPart); #else return 0; #endif } PSNIP_CLOCK__FUNCTION int psnip_clock_monotonic_get_time (struct PsnipClockTimespec* res) { #if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) (void) res; return -2; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC, res); #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME psnip_uint64_t nsec = mach_absolute_time(); static mach_timebase_info_data_t tbi = { 0, }; if (tbi.denom == 0) mach_timebase_info(&tbi); nsec *= ((psnip_uint64_t) tbi.numer) / ((psnip_uint64_t) tbi.denom); res->seconds = nsec / PSNIP_CLOCK_NSEC_PER_SEC; res->nanoseconds = nsec % PSNIP_CLOCK_NSEC_PER_SEC; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER LARGE_INTEGER t, f; if (QueryPerformanceCounter(&t) == 0) return -12; QueryPerformanceFrequency(&f); res->seconds = t.QuadPart / f.QuadPart; res->nanoseconds = t.QuadPart % f.QuadPart; if (f.QuadPart > PSNIP_CLOCK_NSEC_PER_SEC) res->nanoseconds /= f.QuadPart / PSNIP_CLOCK_NSEC_PER_SEC; else res->nanoseconds *= PSNIP_CLOCK_NSEC_PER_SEC / f.QuadPart; #elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64 const ULONGLONG msec = GetTickCount64(); res->seconds = msec / 1000; res->nanoseconds = sec % 1000; #else return -2; #endif return 0; } /* Returns the number of ticks per second for the specified clock. * For example, a clock with millisecond precision would return 1000, * and a clock with 1 second (such as the time() function) would * return 1. * * If the requested clock isn't available, it will return 0. * Hopefully this will be rare, but if it happens to you please let us * know so we can work on finding a way to support your system. * * Note that different clocks on the same system often have a * different precisions. */ PSNIP_CLOCK__FUNCTION psnip_uint32_t psnip_clock_get_precision (enum PsnipClockType clock_type) { switch (clock_type) { case PSNIP_CLOCK_TYPE_MONOTONIC: return psnip_clock_monotonic_get_precision (); case PSNIP_CLOCK_TYPE_CPU: return psnip_clock_cpu_get_precision (); case PSNIP_CLOCK_TYPE_WALL: return psnip_clock_wall_get_precision (); } PSNIP_CLOCK_UNREACHABLE(); return 0; } /* Set the provided timespec to the requested time. Returns 0 on * success, or a negative value on failure. */ PSNIP_CLOCK__FUNCTION int psnip_clock_get_time (enum PsnipClockType clock_type, struct PsnipClockTimespec* res) { assert(res != NULL); switch (clock_type) { case PSNIP_CLOCK_TYPE_MONOTONIC: return psnip_clock_monotonic_get_time (res); case PSNIP_CLOCK_TYPE_CPU: return psnip_clock_cpu_get_time (res); case PSNIP_CLOCK_TYPE_WALL: return psnip_clock_wall_get_time (res); } return -1; } #endif /* !defined(PSNIP_CLOCK_H) */ static psnip_uint64_t munit_clock_get_elapsed(struct PsnipClockTimespec* start, struct PsnipClockTimespec* end) { psnip_uint64_t r = (end->seconds - start->seconds) * PSNIP_CLOCK_NSEC_PER_SEC; if (end->nanoseconds < start->nanoseconds) { r -= (start->nanoseconds - end->nanoseconds); } else { r += (end->nanoseconds - start->nanoseconds); } return r; } #else # include <time.h> #endif /* defined(MUNIT_ENABLE_TIMING) */ /*** PRNG stuff ***/ /* This is (unless I screwed up, which is entirely possible) the * version of PCG with 32-bit state. It was chosen because it has a * small enough state that we should reliably be able to use CAS * instead of requiring a lock for thread-safety. * * If I did screw up, I probably will not bother changing it unless * there is a significant bias. It's really not important this be * particularly strong, as long as it is fairly random it's much more * important that it be reproducible, so bug reports have a better * chance of being reproducible. */ #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__) && !defined(__EMSCRIPTEN__) && (!defined(__GNUC_MINOR__) || (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ > 8)) # define HAVE_STDATOMIC #elif defined(__clang__) # if __has_extension(c_atomic) # define HAVE_CLANG_ATOMICS # endif #endif /* Workaround for http://llvm.org/bugs/show_bug.cgi?id=26911 */ #if defined(__clang__) && defined(_WIN32) # undef HAVE_STDATOMIC # if defined(__c2__) # undef HAVE_CLANG_ATOMICS # endif #endif #if defined(_OPENMP) # define ATOMIC_UINT32_T uint32_t # define ATOMIC_UINT32_INIT(x) (x) #elif defined(HAVE_STDATOMIC) # include <stdatomic.h> # define ATOMIC_UINT32_T _Atomic uint32_t # define ATOMIC_UINT32_INIT(x) ATOMIC_VAR_INIT(x) #elif defined(HAVE_CLANG_ATOMICS) # define ATOMIC_UINT32_T _Atomic uint32_t # define ATOMIC_UINT32_INIT(x) (x) #elif defined(_WIN32) # define ATOMIC_UINT32_T volatile LONG # define ATOMIC_UINT32_INIT(x) (x) #else # define ATOMIC_UINT32_T volatile uint32_t # define ATOMIC_UINT32_INIT(x) (x) #endif static ATOMIC_UINT32_T munit_rand_state = ATOMIC_UINT32_INIT(42); #if defined(_OPENMP) static inline void munit_atomic_store(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T value) { #pragma omp critical (munit_atomics) *dest = value; } static inline uint32_t munit_atomic_load(ATOMIC_UINT32_T* src) { int ret; #pragma omp critical (munit_atomics) ret = *src; return ret; } static inline uint32_t munit_atomic_cas(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T* expected, ATOMIC_UINT32_T desired) { munit_bool ret; #pragma omp critical (munit_atomics) { if (*dest == *expected) { *dest = desired; ret = 1; } else { ret = 0; } } return ret; } #elif defined(HAVE_STDATOMIC) # define munit_atomic_store(dest, value) atomic_store(dest, value) # define munit_atomic_load(src) atomic_load(src) # define munit_atomic_cas(dest, expected, value) atomic_compare_exchange_weak(dest, expected, value) #elif defined(HAVE_CLANG_ATOMICS) # define munit_atomic_store(dest, value) __c11_atomic_store(dest, value, __ATOMIC_SEQ_CST) # define munit_atomic_load(src) __c11_atomic_load(src, __ATOMIC_SEQ_CST) # define munit_atomic_cas(dest, expected, value) __c11_atomic_compare_exchange_weak(dest, expected, value, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) #elif defined(__GNUC__) && (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7) # define munit_atomic_store(dest, value) __atomic_store_n(dest, value, __ATOMIC_SEQ_CST) # define munit_atomic_load(src) __atomic_load_n(src, __ATOMIC_SEQ_CST) # define munit_atomic_cas(dest, expected, value) __atomic_compare_exchange_n(dest, expected, value, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) #elif defined(__GNUC__) && (__GNUC__ >= 4) # define munit_atomic_store(dest,value) do { *(dest) = (value); } while (0) # define munit_atomic_load(src) (*(src)) # define munit_atomic_cas(dest, expected, value) __sync_bool_compare_and_swap(dest, *expected, value) #elif defined(_WIN32) /* Untested */ # define munit_atomic_store(dest,value) do { *(dest) = (value); } while (0) # define munit_atomic_load(src) (*(src)) # define munit_atomic_cas(dest, expected, value) InterlockedCompareExchange((dest), (value), *(expected)) #else # warning No atomic implementation, PRNG will not be thread-safe # define munit_atomic_store(dest, value) do { *(dest) = (value); } while (0) # define munit_atomic_load(src) (*(src)) static inline munit_bool munit_atomic_cas(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T* expected, ATOMIC_UINT32_T desired) { if (*dest == *expected) { *dest = desired; return 1; } else { return 0; } } #endif #define MUNIT_PRNG_MULTIPLIER (747796405U) #define MUNIT_PRNG_INCREMENT (1729U) static munit_uint32_t munit_rand_next_state(munit_uint32_t state) { return state * MUNIT_PRNG_MULTIPLIER + MUNIT_PRNG_INCREMENT; } static munit_uint32_t munit_rand_from_state(munit_uint32_t state) { munit_uint32_t res = ((state >> ((state >> 28) + 4)) ^ state) * (277803737U); res ^= res >> 22; return res; } void munit_rand_seed(munit_uint32_t seed) { munit_uint32_t state = munit_rand_next_state(seed + MUNIT_PRNG_INCREMENT); munit_atomic_store(&munit_rand_state, state); } static munit_uint32_t munit_rand_generate_seed(void) { munit_uint32_t seed, state; #if defined(MUNIT_ENABLE_TIMING) struct PsnipClockTimespec wc = { 0, }; psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wc); seed = (munit_uint32_t) wc.nanoseconds; #else seed = (munit_uint32_t) time(NULL); #endif state = munit_rand_next_state(seed + MUNIT_PRNG_INCREMENT); return munit_rand_from_state(state); } static munit_uint32_t munit_rand_state_uint32(munit_uint32_t* state) { const munit_uint32_t old = *state; *state = munit_rand_next_state(old); return munit_rand_from_state(old); } munit_uint32_t munit_rand_uint32(void) { munit_uint32_t old, state; do { old = munit_atomic_load(&munit_rand_state); state = munit_rand_next_state(old); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return munit_rand_from_state(old); } static void munit_rand_state_memory(munit_uint32_t* state, size_t size, munit_uint8_t data[MUNIT_ARRAY_PARAM(size)]) { size_t members_remaining = size / sizeof(munit_uint32_t); size_t bytes_remaining = size % sizeof(munit_uint32_t); munit_uint8_t* b = data; munit_uint32_t rv; while (members_remaining-- > 0) { rv = munit_rand_state_uint32(state); memcpy(b, &rv, sizeof(munit_uint32_t)); b += sizeof(munit_uint32_t); } if (bytes_remaining != 0) { rv = munit_rand_state_uint32(state); memcpy(b, &rv, bytes_remaining); } } void munit_rand_memory(size_t size, munit_uint8_t data[MUNIT_ARRAY_PARAM(size)]) { munit_uint32_t old, state; do { state = old = munit_atomic_load(&munit_rand_state); munit_rand_state_memory(&state, size, data); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); } static munit_uint32_t munit_rand_state_at_most(munit_uint32_t* state, munit_uint32_t salt, munit_uint32_t max) { /* We want (UINT32_MAX + 1) % max, which in unsigned arithmetic is the same * as (UINT32_MAX + 1 - max) % max = -max % max. We compute -max using not * to avoid compiler warnings. */ const munit_uint32_t min = (~max + 1U) % max; munit_uint32_t x; if (max == (~((munit_uint32_t) 0U))) return munit_rand_state_uint32(state) ^ salt; max++; do { x = munit_rand_state_uint32(state) ^ salt; } while (x < min); return x % max; } static munit_uint32_t munit_rand_at_most(munit_uint32_t salt, munit_uint32_t max) { munit_uint32_t old, state; munit_uint32_t retval; do { state = old = munit_atomic_load(&munit_rand_state); retval = munit_rand_state_at_most(&state, salt, max); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return retval; } int munit_rand_int_range(int min, int max) { munit_uint64_t range = (munit_uint64_t) max - (munit_uint64_t) min; if (min > max) return munit_rand_int_range(max, min); if (range > (~((munit_uint32_t) 0U))) range = (~((munit_uint32_t) 0U)); return min + munit_rand_at_most(0, (munit_uint32_t) range); } double munit_rand_double(void) { munit_uint32_t old, state; double retval = 0.0; do { state = old = munit_atomic_load(&munit_rand_state); /* See http://mumble.net/~campbell/tmp/random_real.c for how to do * this right. Patches welcome if you feel that this is too * biased. */ retval = munit_rand_state_uint32(&state) / ((~((munit_uint32_t) 0U)) + 1.0); } while (!munit_atomic_cas(&munit_rand_state, &old, state)); return retval; } /*** Test suite handling ***/ typedef struct { unsigned int successful; unsigned int skipped; unsigned int failed; unsigned int errored; #if defined(MUNIT_ENABLE_TIMING) munit_uint64_t cpu_clock; munit_uint64_t wall_clock; #endif } MunitReport; typedef struct { const char* prefix; const MunitSuite* suite; const char** tests; munit_uint32_t seed; unsigned int iterations; MunitParameter* parameters; munit_bool single_parameter_mode; void* user_data; MunitReport report; munit_bool colorize; munit_bool fork; munit_bool show_stderr; munit_bool fatal_failures; } MunitTestRunner; const char* munit_parameters_get(const MunitParameter params[], const char* key) { const MunitParameter* param; for (param = params ; param != NULL && param->name != NULL ; param++) if (strcmp(param->name, key) == 0) return param->value; return NULL; } #if defined(MUNIT_ENABLE_TIMING) static void munit_print_time(FILE* fp, munit_uint64_t nanoseconds) { fprintf(fp, "%" MUNIT_TEST_TIME_FORMAT, ((double) nanoseconds) / ((double) PSNIP_CLOCK_NSEC_PER_SEC)); } #endif /* Add a paramter to an array of parameters. */ static MunitResult munit_parameters_add(size_t* params_size, MunitParameter* params[MUNIT_ARRAY_PARAM(*params_size)], char* name, char* value) { *params = realloc(*params, sizeof(MunitParameter) * (*params_size + 2)); if (*params == NULL) return MUNIT_ERROR; (*params)[*params_size].name = name; (*params)[*params_size].value = value; (*params_size)++; (*params)[*params_size].name = NULL; (*params)[*params_size].value = NULL; return MUNIT_OK; } /* Concatenate two strings, but just return one of the components * unaltered if the other is NULL or "". */ static char* munit_maybe_concat(size_t* len, char* prefix, char* suffix) { char* res; size_t res_l; const size_t prefix_l = prefix != NULL ? strlen(prefix) : 0; const size_t suffix_l = suffix != NULL ? strlen(suffix) : 0; if (prefix_l == 0 && suffix_l == 0) { res = NULL; res_l = 0; } else if (prefix_l == 0 && suffix_l != 0) { res = suffix; res_l = suffix_l; } else if (prefix_l != 0 && suffix_l == 0) { res = prefix; res_l = prefix_l; } else { res_l = prefix_l + suffix_l; res = malloc(res_l + 1); memcpy(res, prefix, prefix_l); memcpy(res + prefix_l, suffix, suffix_l); res[res_l] = 0; } if (len != NULL) *len = res_l; return res; } /* Possbily free a string returned by munit_maybe_concat. */ static void munit_maybe_free_concat(char* s, const char* prefix, const char* suffix) { if (prefix != s && suffix != s) free(s); } /* Cheap string hash function, just used to salt the PRNG. */ static munit_uint32_t munit_str_hash(const char* name) { const char *p; munit_uint32_t h = 5381U; for (p = name; *p != '\0'; p++) h = (h << 5) + h + *p; return h; } static void munit_splice(int from, int to) { munit_uint8_t buf[1024]; #if !defined(_WIN32) ssize_t len; ssize_t bytes_written; ssize_t write_res; #else int len; int bytes_written; int write_res; #endif do { len = read(from, buf, sizeof(buf)); if (len > 0) { bytes_written = 0; do { write_res = write(to, buf + bytes_written, len - bytes_written); if (write_res < 0) break; bytes_written += write_res; } while (bytes_written < len); } else break; } while (1); } /* This is the part that should be handled in the child process */ static MunitResult munit_test_runner_exec(MunitTestRunner* runner, const MunitTest* test, const MunitParameter params[], MunitReport* report) { unsigned int iterations = runner->iterations; MunitResult result = MUNIT_FAIL; #if defined(MUNIT_ENABLE_TIMING) struct PsnipClockTimespec wall_clock_begin = { 0, }, wall_clock_end = { 0, }; struct PsnipClockTimespec cpu_clock_begin = { 0, }, cpu_clock_end = { 0, }; #endif unsigned int i = 0; if ((test->options & MUNIT_TEST_OPTION_SINGLE_ITERATION) == MUNIT_TEST_OPTION_SINGLE_ITERATION) iterations = 1; else if (iterations == 0) iterations = runner->suite->iterations; munit_rand_seed(runner->seed); do { void* data = (test->setup == NULL) ? runner->user_data : test->setup(params, runner->user_data); #if defined(MUNIT_ENABLE_TIMING) psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wall_clock_begin); psnip_clock_get_time(PSNIP_CLOCK_TYPE_CPU, &cpu_clock_begin); #endif result = test->test(params, data); #if defined(MUNIT_ENABLE_TIMING) psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wall_clock_end); psnip_clock_get_time(PSNIP_CLOCK_TYPE_CPU, &cpu_clock_end); #endif if (test->tear_down != NULL) test->tear_down(data); if (MUNIT_LIKELY(result == MUNIT_OK)) { report->successful++; #if defined(MUNIT_ENABLE_TIMING) report->wall_clock += munit_clock_get_elapsed(&wall_clock_begin, &wall_clock_end); report->cpu_clock += munit_clock_get_elapsed(&cpu_clock_begin, &cpu_clock_end); #endif } else { switch ((int) result) { case MUNIT_SKIP: report->skipped++; break; case MUNIT_FAIL: report->failed++; break; case MUNIT_ERROR: report->errored++; break; default: break; } break; } } while (++i < iterations); return result; } #if defined(MUNIT_EMOTICON) # define MUNIT_RESULT_STRING_OK ":)" # define MUNIT_RESULT_STRING_SKIP ":|" # define MUNIT_RESULT_STRING_FAIL ":(" # define MUNIT_RESULT_STRING_ERROR ":o" # define MUNIT_RESULT_STRING_TODO ":/" #else # define MUNIT_RESULT_STRING_OK "OK " # define MUNIT_RESULT_STRING_SKIP "SKIP " # define MUNIT_RESULT_STRING_FAIL "FAIL " # define MUNIT_RESULT_STRING_ERROR "ERROR" # define MUNIT_RESULT_STRING_TODO "TODO " #endif static void munit_test_runner_print_color(const MunitTestRunner* runner, const char* string, char color) { if (runner->colorize) fprintf(MUNIT_OUTPUT_FILE, "\x1b[3%cm%s\x1b[39m", color, string); else fputs(string, MUNIT_OUTPUT_FILE); } #if !defined(MUNIT_NO_BUFFER) static int munit_replace_stderr(FILE* stderr_buf) { if (stderr_buf != NULL) { const int orig_stderr = dup(STDERR_FILENO); int errfd = fileno(stderr_buf); if (MUNIT_UNLIKELY(errfd == -1)) { exit(EXIT_FAILURE); } dup2(errfd, STDERR_FILENO); return orig_stderr; } return -1; } static void munit_restore_stderr(int orig_stderr) { if (orig_stderr != -1) { dup2(orig_stderr, STDERR_FILENO); close(orig_stderr); } } #endif /* !defined(MUNIT_NO_BUFFER) */ /* Run a test with the specified parameters. */ static void munit_test_runner_run_test_with_params(MunitTestRunner* runner, const MunitTest* test, const MunitParameter params[]) { MunitResult result = MUNIT_OK; MunitReport report = { 0, 0, 0, 0, #if defined(MUNIT_ENABLE_TIMING) 0, 0 #endif }; unsigned int output_l; munit_bool first; const MunitParameter* param; FILE* stderr_buf; #if !defined(MUNIT_NO_FORK) int pipefd[2]; pid_t fork_pid; int orig_stderr; ssize_t bytes_written = 0; ssize_t write_res; ssize_t bytes_read = 0; ssize_t read_res; int status = 0; pid_t changed_pid; #endif if (params != NULL) { output_l = 2; fputs(" ", MUNIT_OUTPUT_FILE); first = 1; for (param = params ; param != NULL && param->name != NULL ; param++) { if (!first) { fputs(", ", MUNIT_OUTPUT_FILE); output_l += 2; } else { first = 0; } output_l += fprintf(MUNIT_OUTPUT_FILE, "%s=%s", param->name, param->value); } while (output_l++ < MUNIT_TEST_NAME_LEN) { fputc(' ', MUNIT_OUTPUT_FILE); } } fflush(MUNIT_OUTPUT_FILE); stderr_buf = NULL; #if !defined(_WIN32) || defined(__MINGW32__) stderr_buf = tmpfile(); #else tmpfile_s(&stderr_buf); #endif if (stderr_buf == NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to create buffer for stderr"); result = MUNIT_ERROR; goto print_result; } #if !defined(MUNIT_NO_FORK) if (runner->fork) { pipefd[0] = -1; pipefd[1] = -1; if (pipe(pipefd) != 0) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to create pipe"); result = MUNIT_ERROR; goto print_result; } fork_pid = fork(); if (fork_pid == 0) { close(pipefd[0]); orig_stderr = munit_replace_stderr(stderr_buf); munit_test_runner_exec(runner, test, params, &report); /* Note that we don't restore stderr. This is so we can buffer * things written to stderr later on (such as by * asan/tsan/ubsan, valgrind, etc.) */ close(orig_stderr); do { write_res = write(pipefd[1], ((munit_uint8_t*) (&report)) + bytes_written, sizeof(report) - bytes_written); if (write_res < 0) { if (stderr_buf != NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to write to pipe"); } exit(EXIT_FAILURE); } bytes_written += write_res; } while ((size_t) bytes_written < sizeof(report)); if (stderr_buf != NULL) fclose(stderr_buf); close(pipefd[1]); exit(EXIT_SUCCESS); } else if (fork_pid == -1) { close(pipefd[0]); close(pipefd[1]); if (stderr_buf != NULL) { munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to fork"); } report.errored++; result = MUNIT_ERROR; } else { close(pipefd[1]); do { read_res = read(pipefd[0], ((munit_uint8_t*) (&report)) + bytes_read, sizeof(report) - bytes_read); if (read_res < 1) break; bytes_read += read_res; } while (bytes_read < (ssize_t) sizeof(report)); changed_pid = waitpid(fork_pid, &status, 0); if (MUNIT_LIKELY(changed_pid == fork_pid) && MUNIT_LIKELY(WIFEXITED(status))) { if (bytes_read != sizeof(report)) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child exited unexpectedly with status %d", WEXITSTATUS(status)); report.errored++; } else if (WEXITSTATUS(status) != EXIT_SUCCESS) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child exited with status %d", WEXITSTATUS(status)); report.errored++; } } else { if (WIFSIGNALED(status)) { #if defined(_XOPEN_VERSION) && (_XOPEN_VERSION >= 700) munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child killed by signal %d (%s)", WTERMSIG(status), strsignal(WTERMSIG(status))); #else munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child killed by signal %d", WTERMSIG(status)); #endif } else if (WIFSTOPPED(status)) { munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child stopped by signal %d", WSTOPSIG(status)); } report.errored++; } close(pipefd[0]); waitpid(fork_pid, NULL, 0); } } else #endif { #if !defined(MUNIT_NO_BUFFER) const volatile int orig_stderr = munit_replace_stderr(stderr_buf); #endif #if defined(MUNIT_THREAD_LOCAL) if (MUNIT_UNLIKELY(setjmp(munit_error_jmp_buf) != 0)) { result = MUNIT_FAIL; report.failed++; } else { munit_error_jmp_buf_valid = 1; result = munit_test_runner_exec(runner, test, params, &report); } #else result = munit_test_runner_exec(runner, test, params, &report); #endif #if !defined(MUNIT_NO_BUFFER) munit_restore_stderr(orig_stderr); #endif /* Here just so that the label is used on Windows and we don't get * a warning */ goto print_result; } print_result: fputs("[ ", MUNIT_OUTPUT_FILE); if ((test->options & MUNIT_TEST_OPTION_TODO) == MUNIT_TEST_OPTION_TODO) { if (report.failed != 0 || report.errored != 0 || report.skipped != 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_TODO, '3'); result = MUNIT_OK; } else { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_ERROR, '1'); if (MUNIT_LIKELY(stderr_buf != NULL)) munit_log_internal(MUNIT_LOG_ERROR, stderr_buf, "Test marked TODO, but was successful."); runner->report.failed++; result = MUNIT_ERROR; } } else if (report.failed > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_FAIL, '1'); runner->report.failed++; result = MUNIT_FAIL; } else if (report.errored > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_ERROR, '1'); runner->report.errored++; result = MUNIT_ERROR; } else if (report.skipped > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_SKIP, '3'); runner->report.skipped++; result = MUNIT_SKIP; } else if (report.successful > 1) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_OK, '2'); #if defined(MUNIT_ENABLE_TIMING) fputs(" ] [ ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock / report.successful); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock / report.successful); fprintf(MUNIT_OUTPUT_FILE, " CPU ]\n %-" MUNIT_XSTRINGIFY(MUNIT_TEST_NAME_LEN) "s Total: [ ", ""); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock); fputs(" CPU", MUNIT_OUTPUT_FILE); #endif runner->report.successful++; result = MUNIT_OK; } else if (report.successful > 0) { munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_OK, '2'); #if defined(MUNIT_ENABLE_TIMING) fputs(" ] [ ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock); fputs(" / ", MUNIT_OUTPUT_FILE); munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock); fputs(" CPU", MUNIT_OUTPUT_FILE); #endif runner->report.successful++; result = MUNIT_OK; } fputs(" ]\n", MUNIT_OUTPUT_FILE); if (stderr_buf != NULL) { if (result == MUNIT_FAIL || result == MUNIT_ERROR || runner->show_stderr) { fflush(MUNIT_OUTPUT_FILE); rewind(stderr_buf); munit_splice(fileno(stderr_buf), STDERR_FILENO); fflush(stderr); } fclose(stderr_buf); } } static void munit_test_runner_run_test_wild(MunitTestRunner* runner, const MunitTest* test, const char* test_name, MunitParameter* params, MunitParameter* p) { const MunitParameterEnum* pe; char** values; MunitParameter* next; for (pe = test->parameters ; pe != NULL && pe->name != NULL ; pe++) { if (p->name == pe->name) break; } if (pe == NULL) return; for (values = pe->values ; *values != NULL ; values++) { next = p + 1; p->value = *values; if (next->name == NULL) { munit_test_runner_run_test_with_params(runner, test, params); } else { munit_test_runner_run_test_wild(runner, test, test_name, params, next); } if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) break; } } /* Run a single test, with every combination of parameters * requested. */ static void munit_test_runner_run_test(MunitTestRunner* runner, const MunitTest* test, const char* prefix) { char* test_name = munit_maybe_concat(NULL, (char*) prefix, (char*) test->name); /* The array of parameters to pass to * munit_test_runner_run_test_with_params */ MunitParameter* params = NULL; size_t params_l = 0; /* Wildcard parameters are parameters which have possible values * specified in the test, but no specific value was passed to the * CLI. That means we want to run the test once for every * possible combination of parameter values or, if --single was * passed to the CLI, a single time with a random set of * parameters. */ MunitParameter* wild_params = NULL; size_t wild_params_l = 0; const MunitParameterEnum* pe; const MunitParameter* cli_p; munit_bool filled; unsigned int possible; char** vals; size_t first_wild; const MunitParameter* wp; int pidx; munit_rand_seed(runner->seed); fprintf(MUNIT_OUTPUT_FILE, "%-" MUNIT_XSTRINGIFY(MUNIT_TEST_NAME_LEN) "s", test_name); if (test->parameters == NULL) { /* No parameters. Simple, nice. */ munit_test_runner_run_test_with_params(runner, test, NULL); } else { fputc('\n', MUNIT_OUTPUT_FILE); for (pe = test->parameters ; pe != NULL && pe->name != NULL ; pe++) { /* Did we received a value for this parameter from the CLI? */ filled = 0; for (cli_p = runner->parameters ; cli_p != NULL && cli_p->name != NULL ; cli_p++) { if (strcmp(cli_p->name, pe->name) == 0) { if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, cli_p->value) != MUNIT_OK)) goto cleanup; filled = 1; break; } } if (filled) continue; /* Nothing from CLI, is the enum NULL/empty? We're not a * fuzzer… */ if (pe->values == NULL || pe->values[0] == NULL) continue; /* If --single was passed to the CLI, choose a value from the * list of possibilities randomly. */ if (runner->single_parameter_mode) { possible = 0; for (vals = pe->values ; *vals != NULL ; vals++) possible++; /* We want the tests to be reproducible, even if you're only * running a single test, but we don't want every test with * the same number of parameters to choose the same parameter * number, so use the test name as a primitive salt. */ pidx = munit_rand_at_most(munit_str_hash(test_name), possible - 1); if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, pe->values[pidx]) != MUNIT_OK)) goto cleanup; } else { /* We want to try every permutation. Put in a placeholder * entry, we'll iterate through them later. */ if (MUNIT_UNLIKELY(munit_parameters_add(&wild_params_l, &wild_params, pe->name, NULL) != MUNIT_OK)) goto cleanup; } } if (wild_params_l != 0) { first_wild = params_l; for (wp = wild_params ; wp != NULL && wp->name != NULL ; wp++) { for (pe = test->parameters ; pe != NULL && pe->name != NULL && pe->values != NULL ; pe++) { if (strcmp(wp->name, pe->name) == 0) { if (MUNIT_UNLIKELY(munit_parameters_add(&params_l, &params, pe->name, pe->values[0]) != MUNIT_OK)) goto cleanup; } } } munit_test_runner_run_test_wild(runner, test, test_name, params, params + first_wild); } else { munit_test_runner_run_test_with_params(runner, test, params); } cleanup: free(params); free(wild_params); } munit_maybe_free_concat(test_name, prefix, test->name); } /* Recurse through the suite and run all the tests. If a list of * tests to run was provied on the command line, run only those * tests. */ static void munit_test_runner_run_suite(MunitTestRunner* runner, const MunitSuite* suite, const char* prefix) { size_t pre_l; char* pre = munit_maybe_concat(&pre_l, (char*) prefix, (char*) suite->prefix); const MunitTest* test; const char** test_name; const MunitSuite* child_suite; /* Run the tests. */ for (test = suite->tests ; test != NULL && test->test != NULL ; test++) { if (runner->tests != NULL) { /* Specific tests were requested on the CLI */ for (test_name = runner->tests ; test_name != NULL && *test_name != NULL ; test_name++) { if ((pre_l == 0 || strncmp(pre, *test_name, pre_l) == 0) && strncmp(test->name, *test_name + pre_l, strlen(*test_name + pre_l)) == 0) { munit_test_runner_run_test(runner, test, pre); if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) goto cleanup; } } } else { /* Run all tests */ munit_test_runner_run_test(runner, test, pre); } } if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0)) goto cleanup; /* Run any child suites. */ for (child_suite = suite->suites ; child_suite != NULL && child_suite->prefix != NULL ; child_suite++) { munit_test_runner_run_suite(runner, child_suite, pre); } cleanup: munit_maybe_free_concat(pre, prefix, suite->prefix); } static void munit_test_runner_run(MunitTestRunner* runner) { munit_test_runner_run_suite(runner, runner->suite, NULL); } static void munit_print_help(int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)], void* user_data, const MunitArgument arguments[]) { const MunitArgument* arg; (void) argc; printf("USAGE: %s [OPTIONS...] [TEST...]\n\n", argv[0]); puts(" --seed SEED\n" " Value used to seed the PRNG. Must be a 32-bit integer in decimal\n" " notation with no separators (commas, decimals, spaces, etc.), or\n" " hexidecimal prefixed by \"0x\".\n" " --iterations N\n" " Run each test N times. 0 means the default number.\n" " --param name value\n" " A parameter key/value pair which will be passed to any test with\n" " takes a parameter of that name. If not provided, the test will be\n" " run once for each possible parameter value.\n" " --list Write a list of all available tests.\n" " --list-params\n" " Write a list of all available tests and their possible parameters.\n" " --single Run each parameterized test in a single configuration instead of\n" " every possible combination\n" " --log-visible debug|info|warning|error\n" " --log-fatal debug|info|warning|error\n" " Set the level at which messages of different severities are visible,\n" " or cause the test to terminate.\n" #if !defined(MUNIT_NO_FORK) " --no-fork Do not execute tests in a child process. If this option is supplied\n" " and a test crashes (including by failing an assertion), no further\n" " tests will be performed.\n" #endif " --fatal-failures\n" " Stop executing tests as soon as a failure is found.\n" " --show-stderr\n" " Show data written to stderr by the tests, even if the test succeeds.\n" " --color auto|always|never\n" " Colorize (or don't) the output.\n" /* 12345678901234567890123456789012345678901234567890123456789012345678901234567890 */ " --help Print this help message and exit.\n"); #if defined(MUNIT_NL_LANGINFO) setlocale(LC_ALL, ""); fputs((strcasecmp("UTF-8", nl_langinfo(CODESET)) == 0) ? "µnit" : "munit", stdout); #else puts("munit"); #endif printf(" %d.%d.%d\n" "Full documentation at: https://nemequ.github.io/munit/\n", (MUNIT_CURRENT_VERSION >> 16) & 0xff, (MUNIT_CURRENT_VERSION >> 8) & 0xff, (MUNIT_CURRENT_VERSION >> 0) & 0xff); for (arg = arguments ; arg != NULL && arg->name != NULL ; arg++) arg->write_help(arg, user_data); } static const MunitArgument* munit_arguments_find(const MunitArgument arguments[], const char* name) { const MunitArgument* arg; for (arg = arguments ; arg != NULL && arg->name != NULL ; arg++) if (strcmp(arg->name, name) == 0) return arg; return NULL; } static void munit_suite_list_tests(const MunitSuite* suite, munit_bool show_params, const char* prefix) { size_t pre_l; char* pre = munit_maybe_concat(&pre_l, (char*) prefix, (char*) suite->prefix); const MunitTest* test; const MunitParameterEnum* params; munit_bool first; char** val; const MunitSuite* child_suite; for (test = suite->tests ; test != NULL && test->name != NULL ; test++) { if (pre != NULL) fputs(pre, stdout); puts(test->name); if (show_params) { for (params = test->parameters ; params != NULL && params->name != NULL ; params++) { fprintf(stdout, " - %s: ", params->name); if (params->values == NULL) { puts("Any"); } else { first = 1; for (val = params->values ; *val != NULL ; val++ ) { if(!first) { fputs(", ", stdout); } else { first = 0; } fputs(*val, stdout); } putc('\n', stdout); } } } } for (child_suite = suite->suites ; child_suite != NULL && child_suite->prefix != NULL ; child_suite++) { munit_suite_list_tests(child_suite, show_params, pre); } munit_maybe_free_concat(pre, prefix, suite->prefix); } static munit_bool munit_stream_supports_ansi(FILE *stream) { #if !defined(_WIN32) return isatty(fileno(stream)); #else #if !defined(__MINGW32__) size_t ansicon_size = 0; #endif if (isatty(fileno(stream))) { #if !defined(__MINGW32__) getenv_s(&ansicon_size, NULL, 0, "ANSICON"); return ansicon_size != 0; #else return getenv("ANSICON") != NULL; #endif } return 0; #endif } int munit_suite_main_custom(const MunitSuite* suite, void* user_data, int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)], const MunitArgument arguments[]) { int result = EXIT_FAILURE; MunitTestRunner runner; size_t parameters_size = 0; size_t tests_size = 0; int arg; char* envptr; unsigned long ts; char* endptr; unsigned long long iterations; MunitLogLevel level; const MunitArgument* argument; const char** runner_tests; unsigned int tests_run; unsigned int tests_total; runner.prefix = NULL; runner.suite = NULL; runner.tests = NULL; runner.seed = 0; runner.iterations = 0; runner.parameters = NULL; runner.single_parameter_mode = 0; runner.user_data = NULL; runner.report.successful = 0; runner.report.skipped = 0; runner.report.failed = 0; runner.report.errored = 0; #if defined(MUNIT_ENABLE_TIMING) runner.report.cpu_clock = 0; runner.report.wall_clock = 0; #endif runner.colorize = 0; #if !defined(_WIN32) runner.fork = 1; #else runner.fork = 0; #endif runner.show_stderr = 0; runner.fatal_failures = 0; runner.suite = suite; runner.user_data = user_data; runner.seed = munit_rand_generate_seed(); runner.colorize = munit_stream_supports_ansi(MUNIT_OUTPUT_FILE); for (arg = 1 ; arg < argc ; arg++) { if (strncmp("--", argv[arg], 2) == 0) { if (strcmp("seed", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } envptr = argv[arg + 1]; ts = strtoul(argv[arg + 1], &envptr, 0); if (*envptr != '\0' || ts > (~((munit_uint32_t) 0U))) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } runner.seed = (munit_uint32_t) ts; arg++; } else if (strcmp("iterations", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } endptr = argv[arg + 1]; iterations = strtoul(argv[arg + 1], &endptr, 0); if (*endptr != '\0' || iterations > UINT_MAX) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } runner.iterations = (unsigned int) iterations; arg++; } else if (strcmp("param", argv[arg] + 2) == 0) { if (arg + 2 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires two arguments", argv[arg]); goto cleanup; } runner.parameters = realloc(runner.parameters, sizeof(MunitParameter) * (parameters_size + 2)); if (runner.parameters == NULL) { munit_log_internal(MUNIT_LOG_ERROR, stderr, "failed to allocate memory"); goto cleanup; } runner.parameters[parameters_size].name = (char*) argv[arg + 1]; runner.parameters[parameters_size].value = (char*) argv[arg + 2]; parameters_size++; runner.parameters[parameters_size].name = NULL; runner.parameters[parameters_size].value = NULL; arg += 2; } else if (strcmp("color", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } if (strcmp(argv[arg + 1], "always") == 0) runner.colorize = 1; else if (strcmp(argv[arg + 1], "never") == 0) runner.colorize = 0; else if (strcmp(argv[arg + 1], "auto") == 0) runner.colorize = munit_stream_supports_ansi(MUNIT_OUTPUT_FILE); else { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } arg++; } else if (strcmp("help", argv[arg] + 2) == 0) { munit_print_help(argc, argv, user_data, arguments); result = EXIT_SUCCESS; goto cleanup; } else if (strcmp("single", argv[arg] + 2) == 0) { runner.single_parameter_mode = 1; } else if (strcmp("show-stderr", argv[arg] + 2) == 0) { runner.show_stderr = 1; #if !defined(_WIN32) } else if (strcmp("no-fork", argv[arg] + 2) == 0) { runner.fork = 0; #endif } else if (strcmp("fatal-failures", argv[arg] + 2) == 0) { runner.fatal_failures = 1; } else if (strcmp("log-visible", argv[arg] + 2) == 0 || strcmp("log-fatal", argv[arg] + 2) == 0) { if (arg + 1 >= argc) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]); goto cleanup; } if (strcmp(argv[arg + 1], "debug") == 0) level = MUNIT_LOG_DEBUG; else if (strcmp(argv[arg + 1], "info") == 0) level = MUNIT_LOG_INFO; else if (strcmp(argv[arg + 1], "warning") == 0) level = MUNIT_LOG_WARNING; else if (strcmp(argv[arg + 1], "error") == 0) level = MUNIT_LOG_ERROR; else { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]); goto cleanup; } if (strcmp("log-visible", argv[arg] + 2) == 0) munit_log_level_visible = level; else munit_log_level_fatal = level; arg++; } else if (strcmp("list", argv[arg] + 2) == 0) { munit_suite_list_tests(suite, 0, NULL); result = EXIT_SUCCESS; goto cleanup; } else if (strcmp("list-params", argv[arg] + 2) == 0) { munit_suite_list_tests(suite, 1, NULL); result = EXIT_SUCCESS; goto cleanup; } else { argument = munit_arguments_find(arguments, argv[arg] + 2); if (argument == NULL) { munit_logf_internal(MUNIT_LOG_ERROR, stderr, "unknown argument ('%s')", argv[arg]); goto cleanup; } if (!argument->parse_argument(suite, user_data, &arg, argc, argv)) goto cleanup; } } else { runner_tests = realloc((void*) runner.tests, sizeof(char*) * (tests_size + 2)); if (runner_tests == NULL) { munit_log_internal(MUNIT_LOG_ERROR, stderr, "failed to allocate memory"); goto cleanup; } runner.tests = runner_tests; runner.tests[tests_size++] = argv[arg]; runner.tests[tests_size] = NULL; } } fflush(stderr); fprintf(MUNIT_OUTPUT_FILE, "Running test suite with seed 0x%08" PRIx32 "...\n", runner.seed); munit_test_runner_run(&runner); tests_run = runner.report.successful + runner.report.failed + runner.report.errored; tests_total = tests_run + runner.report.skipped; if (tests_run == 0) { fprintf(stderr, "No tests run, %d (100%%) skipped.\n", runner.report.skipped); } else { fprintf(MUNIT_OUTPUT_FILE, "%d of %d (%0.0f%%) tests successful, %d (%0.0f%%) test skipped.\n", runner.report.successful, tests_run, (((double) runner.report.successful) / ((double) tests_run)) * 100.0, runner.report.skipped, (((double) runner.report.skipped) / ((double) tests_total)) * 100.0); } if (runner.report.failed == 0 && runner.report.errored == 0) { result = EXIT_SUCCESS; } cleanup: free(runner.parameters); free((void*) runner.tests); return result; } int munit_suite_main(const MunitSuite* suite, void* user_data, int argc, char* const argv[MUNIT_ARRAY_PARAM(argc + 1)]) { return munit_suite_main_custom(suite, user_data, argc, argv, NULL); }
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 4; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
validation.c
int isValid() { #pragma omp barrier double actual = 27885326029.756424; double s_sum = 0.0; double q_sum = 0.0; double rand1=0.1, rand2=0.9; double expected=0.0; int i,j; double diff=0.0; for(i=0; i<=N-1; i++) for (j=0; j<=N-1; j++) s_sum+=B[i][j]*rand1*rand2; expected = s_sum; diff=abs(expected-actual); //printf("expected=%f\n",expected); //printf("actual=%f\n",actual); //printf("diff=%f\n",diff); //printf("diff=%d\n",(diff < 0.00000001)); if (diff < 0.00000001) return 1; else return 0; }
lock.c
#include <stdio.h> #include<stdlib.h> #include <omp.h> #define NUM_VALUES 20 int NUM_BUCKETS = 0; int take_a_number(){ return rand()%NUM_BUCKETS; } int main() { int i; omp_set_dynamic(0); NUM_BUCKETS = omp_get_num_procs(); omp_set_num_threads(NUM_BUCKETS); printf("Buckets number = %d\n", NUM_BUCKETS); omp_lock_t hist_locks[NUM_BUCKETS]; int hist[NUM_BUCKETS]; #pragma omp parallel for for (i=0; i<NUM_BUCKETS; i++){ omp_init_lock(&hist_locks[i]); hist[i] = 0; } #pragma omp parallel for for (i=0; i<NUM_VALUES; i++){ int id = omp_get_thread_num(); printf("Thread %d\n", id); int val = take_a_number(); omp_set_lock(&hist_locks[id]); hist[val]++; omp_unset_lock(&hist_locks[id]); } for (i=0; i<NUM_BUCKETS; i++){ printf("hist[%d] = %d\n", i, hist[i]); omp_destroy_lock(&hist_locks[i]); } }
MM_magma_omp.c
/* basd on code from Evgenii B. Rudnyi, http://MatrixProgramming.com */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/resource.h> #include <stdint.h> #include <math.h> #include <cuda_runtime_api.h> #include "cublas_v2.h" #include "magma.h" #define dim1 50 #define dim2 50 #define dim3 50 #define BILLION 1000000000 int64_t timespecDiff(struct timespec *timeA_p, struct timespec *timeB_p) { return ((timeA_p->tv_sec * BILLION) + timeA_p->tv_nsec) - ((timeB_p->tv_sec * BILLION) + timeB_p->tv_nsec); } int main() { // const rlim_t kStackSize = 16 * 1024 * 1024; // min stack size = 16 MB // struct rlimit rl; // int result; // result = getrlimit(RLIMIT_STACK, &rl); // printf("getrlimit returned result = %d\n", result); // if (result == 0) // { // if (rl.rlim_cur < kStackSize) // { // rl.rlim_cur = kStackSize; // result = setrlimit(RLIMIT_STACK, &rl); // fprintf(stdout,"setrlimit returned result = %d\n", result); // if (result != 0) // { // fprintf(stdout,"setrlimit returned result = %d\n", result); // } // } // } printf("newdim=%d\n", (int)dim1*(int)dim2); double A[dim1][dim2], B[dim2][dim3], C[dim1][dim3]; double Anew[dim1*dim2]; double Bnew[dim2*dim3]; double Cnew[dim1*dim3]; int i, j, k, nthreads, tid, chunk, NO_WRITE; double maxr, timesec; struct timespec start, end; uint64_t timeElapsed; FILE *fp; double alpha = 1.; double beta = 0.; char const ch = 'N'; NO_WRITE = 1; srand(86456); maxr = (double)RAND_MAX; omp_set_num_threads(32); chunk = 10; /* set loop iteration chunk size */ printf("dim1 = %d\n",dim1); printf("dim2 = %d\n",dim2); printf("dim3 = %d\n",dim3); printf("max random = %f\n",maxr); /*** Spawn a parallel region explicitly scoping all variables ***/ #pragma omp parallel shared(A,B,C,nthreads,chunk) private(tid,i,j,k) { tid = omp_get_thread_num(); if (tid == 0) { nthreads = omp_get_num_threads(); printf("Starting matrix multiple example with %d threads\n",nthreads); printf("Initializing matrices...\n"); } /*** Initialize matrices ***/ if (tid == 0) printf("Creating M1\n"); if (tid == 0) clock_gettime(CLOCK_MONOTONIC, &start); #pragma omp for schedule (static, chunk) for (i = 0; i < dim1; i++) for (j = 0; j < dim2; j++) A[i][j] = rand()/maxr; if (tid == 0) { clock_gettime(CLOCK_MONOTONIC, &end); timeElapsed = timespecDiff(&end, &start); timesec = (double)timeElapsed/(double)BILLION; printf("time for creation of M1 is %llu ns = %fs\n", (long long unsigned int)timeElapsed, timesec); if(NO_WRITE) { printf("Writing M1 to file M1.dat\n"); fp = fopen("M1.dat", "w"); if (fp == NULL) { printf("I couldn't open M1.dat for writing.\n"); exit(0); } for (i = 0; i < dim1; i++) for (j = 0; j < dim2; j++) fprintf(fp, "%f\t", A[i][j]); fclose(fp); } } if (tid == 0) printf("Creating M2\n"); if (tid == 0) clock_gettime(CLOCK_MONOTONIC, &start); #pragma omp for schedule (static, chunk) for (i = 0; i < dim2; i++) for (j = 0; j < dim3; j++) B[i][j] = rand()/maxr; if (tid == 0) { clock_gettime(CLOCK_MONOTONIC, &end); timeElapsed = timespecDiff(&end, &start); timesec = (double)timeElapsed/(double)BILLION; printf("time for creation of M2 is %llu ns = %fs\n", (long long unsigned int)timeElapsed, timesec); if(NO_WRITE) { printf("Writing M2 to file M2.dat\n"); fp = fopen("M2.dat", "w"); if (fp == NULL) { printf("I couldn't open M2.dat for writing.\n"); exit(0); } for (i = 0; i < dim2; i++) for (j = 0; j < dim3; j++) fprintf(fp, "%f\t", B[i][j]); fclose(fp); } } if (tid == 0) printf("Starting M1*M2\n"); if (tid == 0) clock_gettime(CLOCK_MONOTONIC, &start); /*** Do matrix multiply sharing iterations on outer loop ***/ /*** Display who does which iterations for demonstration purposes ***/ #pragma omp for schedule (static, chunk) for (i = 0; i < dim1; i++) { //printf("Thread=%d did row=%d\n",tid,i); for (j = 0; j < dim3; j++) C[i][j] = 0.; for (k = 0; k < dim2; k++) for (j = 0; j < dim3; j++) C[i][j] += A[i][k]*B[k][j]; } if (tid == 0) { clock_gettime(CLOCK_MONOTONIC, &end); printf("Done\n"); timeElapsed = timespecDiff(&end, &start); timesec = (double)timeElapsed/(double)BILLION; printf("time for C(%d,%d) = A(%d,%d) B(%d,%d) is %llu ns = %fs \n", dim1, dim3, dim1, dim2, dim2, dim3, (long long unsigned int)timeElapsed, timesec); if(NO_WRITE) { printf("Writing M1*M2 to file M1M2.dat\n"); fp = fopen("M1M2.dat", "w"); if (fp == NULL) { printf("I couldn't open M1M2.dat for writing.\n"); exit(0); } for (i = 0; i < dim2; i++) for (j = 0; j < dim3; j++) fprintf(fp, "%f\t", C[i][j]); fclose(fp); } } } /*** End of parallel region ***/ /* Compute C = A B with magma */ printf("Compute with MAGMA\n"); printf("Array initialization\n"); for (i = 0; i < dim1; i++) for (j = 0; j < dim2; j++) Anew[i + j * dim1] = A[j][i]; for (i = 0; i < dim2; i++) for (j = 0; j < dim3; j++) Bnew[i + j * dim2] = B[j][i]; for (i = 0; i < dim1; i++) for (j = 0; j < dim3; j++) //Cnew[i + j * dim1] = C[j][i]; Cnew[i + j * dim1] = 0.; /*for (i = 0; i < dim1*dim2 ; i++) printf("%f\t", Anew[i]); printf("\n"); for (i = 0; i < dim1*dim2; i++) printf("%f\t", Bnew[i]); printf("\n"); */ printf("Start computation..\n"); magma_init(); clock_gettime(CLOCK_MONOTONIC, &start); //cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, dim1, dim3, dim2, alpha, Anew, dim1, Bnew, dim2, beta, Cnew, dim1); magmablas_dgemm(MagmaNoTrans, MagmaNoTrans, dim1, dim3, dim2, alpha, Anew, dim1, Bnew, dim2, beta, Cnew, dim1); clock_gettime(CLOCK_MONOTONIC, &end); timeElapsed = timespecDiff(&end, &start); timesec = (double)timeElapsed/(double)BILLION; printf("Done.\n"); printf("time for C(%d,%d) = A(%d,%d) B(%d,%d) with MAGMA is %llu ns = %fs \n", dim1, dim3, dim1, dim2, dim2, dim3, (long long unsigned int)timeElapsed, timesec); if(NO_WRITE) { printf("Writing M1*M2 to file M1M2_MAGMA.dat\n"); fp = fopen("M1M2_MAGMA.dat", "w"); if (fp == NULL) { printf("I couldn't open M1M2_MAGMA.dat for writing.\n"); exit(0); } for (i = 0; i < dim2; i++) for (j = 0; j < dim3; j++) fprintf(fp, "%f\t", Cnew[i + j * dim1]); fclose(fp); } return 0; }
mhpTest3.c
int g; int foo() { int x; x = 10; #pragma omp barrier g = 10; int y; y = 10; } int func1() { int f11; foo(); int f12; } int func2() { int f21; g; foo(); int f22; } int main() { #pragma omp parallel { int a = 10; if(a > 12) func1(); else func2(); } }
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 16; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
CachedConstantSumWots.h
#ifndef CACHEDCONSTANTSUM_WOTS #define CACHEDCONSTANTSUM_WOTS #include "wots/ConstantSumWots.h" template <class D, int W, int T, int S> class CachedConstantSumWots : public virtual ConstantSumWots<D, W, T, S> { public: CachedConstantSumWots() noexcept {}; CachedConstantSumWots(const ByteArray& seed) noexcept : ConstantSumWots<D,W,T,S>(seed) {}; const std::vector<ByteArray> sign(ByteArray& data) { std::vector<unsigned int> blocks = this->genFingerprint(data); std::vector<ByteArray> signature(blocks.size()); const unsigned int C = W/2; //#pragma omp parallel for for(long unsigned int i = 0; i < blocks.size(); i++){ unsigned int iterations = W -blocks[i]; if(iterations >= C) signature[i] = this->digestChain(this->cache[i], iterations-C); else signature[i] = this->digestChain(this->private_key[i], iterations); } return signature; }; virtual void clearPublicKey() { cache.clear(); ConstantSumWots<D,W,T,S>::clearPublicKey(); }; protected: virtual void genPublicKey() { this->loadPrivateKey(); ByteArray pub; this->cache = std::vector<ByteArray>(this->private_key.size()); const int C = W/2; for(long unsigned int i = 0; i < this->private_key.size(); i++){ this->cache[i] = this->digestChain(this->private_key[i], C); pub += this->digestChain(this->cache[i], W-C); } this->public_key = this->digest(pub); }; std::vector<ByteArray> cache; }; #endif
StmtOpenMP.h
//===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// \file /// This file defines OpenMP AST classes for executable directives and /// clauses. /// //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMTOPENMP_H #define LLVM_CLANG_AST_STMTOPENMP_H #include "clang/AST/ASTContext.h" #include "clang/AST/Expr.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtCXX.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" namespace clang { //===----------------------------------------------------------------------===// // AST classes for directives. //===----------------------------------------------------------------------===// /// Representation of an OpenMP canonical loop. /// /// OpenMP 1.0 C/C++, section 2.4.1 for Construct; canonical-shape /// OpenMP 2.0 C/C++, section 2.4.1 for Construct; canonical-shape /// OpenMP 2.5, section 2.5.1 Loop Construct; canonical form /// OpenMP 3.1, section 2.5.1 Loop Construct; canonical form /// OpenMP 4.0, section 2.6 Canonical Loop Form /// OpenMP 4.5, section 2.6 Canonical Loop Form /// OpenMP 5.0, section 2.9.1 Canonical Loop Form /// OpenMP 5.1, section 2.11.1 Canonical Loop Nest Form /// /// An OpenMP canonical loop is a for-statement or range-based for-statement /// with additional requirements that ensure that the number of iterations is /// known before entering the loop and allow skipping to an arbitrary iteration. /// The OMPCanonicalLoop AST node wraps a ForStmt or CXXForRangeStmt that is /// known to fulfill OpenMP's canonical loop requirements because of being /// associated to an OMPLoopBasedDirective. That is, the general structure is: /// /// OMPLoopBasedDirective /// [`- CapturedStmt ] /// [ `- CapturedDecl] /// ` OMPCanonicalLoop /// `- ForStmt/CXXForRangeStmt /// `- Stmt /// /// One or multiple CapturedStmt/CapturedDecl pairs may be inserted by some /// directives such as OMPParallelForDirective, but others do not need them /// (such as OMPTileDirective). In The OMPCanonicalLoop and /// ForStmt/CXXForRangeStmt pair is repeated for loop associated with the /// directive. A OMPCanonicalLoop must not appear in the AST unless associated /// with a OMPLoopBasedDirective. In an imperfectly nested loop nest, the /// OMPCanonicalLoop may also be wrapped in a CompoundStmt: /// /// [...] /// ` OMPCanonicalLoop /// `- ForStmt/CXXForRangeStmt /// `- CompoundStmt /// |- Leading in-between code (if any) /// |- OMPCanonicalLoop /// | `- ForStmt/CXXForRangeStmt /// | `- ... /// `- Trailing in-between code (if any) /// /// The leading/trailing in-between code must not itself be a OMPCanonicalLoop /// to avoid confusion which loop belongs to the nesting. /// /// There are three different kinds of iteration variables for different /// purposes: /// * Loop user variable: The user-accessible variable with different value for /// each iteration. /// * Loop iteration variable: The variable used to identify a loop iteration; /// for range-based for-statement, this is the hidden iterator '__begin'. For /// other loops, it is identical to the loop user variable. Must be a /// random-access iterator, pointer or integer type. /// * Logical iteration counter: Normalized loop counter starting at 0 and /// incrementing by one at each iteration. Allows abstracting over the type /// of the loop iteration variable and is always an unsigned integer type /// appropriate to represent the range of the loop iteration variable. Its /// value corresponds to the logical iteration number in the OpenMP /// specification. /// /// This AST node provides two captured statements: /// * The distance function which computes the number of iterations. /// * The loop user variable function that computes the loop user variable when /// given a logical iteration number. /// /// These captured statements provide the link between C/C++ semantics and the /// logical iteration counters used by the OpenMPIRBuilder which is /// language-agnostic and therefore does not know e.g. how to advance a /// random-access iterator. The OpenMPIRBuilder will use this information to /// apply simd, workshare-loop, distribute, taskloop and loop directives to the /// loop. For compatibility with the non-OpenMPIRBuilder codegen path, an /// OMPCanonicalLoop can itself also be wrapped into the CapturedStmts of an /// OMPLoopDirective and skipped when searching for the associated syntactical /// loop. /// /// Example: /// <code> /// std::vector<std::string> Container{1,2,3}; /// for (std::string Str : Container) /// Body(Str); /// </code> /// which is syntactic sugar for approximately: /// <code> /// auto &&__range = Container; /// auto __begin = std::begin(__range); /// auto __end = std::end(__range); /// for (; __begin != __end; ++__begin) { /// std::String Str = *__begin; /// Body(Str); /// } /// </code> /// In this example, the loop user variable is `Str`, the loop iteration /// variable is `__begin` of type `std::vector<std::string>::iterator` and the /// logical iteration number type is `size_t` (unsigned version of /// `std::vector<std::string>::iterator::difference_type` aka `ptrdiff_t`). /// Therefore, the distance function will be /// <code> /// [&](size_t &Result) { Result = __end - __begin; } /// </code> /// and the loop variable function is /// <code> /// [&,__begin](std::vector<std::string>::iterator &Result, size_t Logical) { /// Result = __begin + Logical; /// } /// </code> /// The variable `__begin`, aka the loop iteration variable, is captured by /// value because it is modified in the loop body, but both functions require /// the initial value. The OpenMP specification explicitly leaves unspecified /// when the loop expressions are evaluated such that a capture by reference is /// sufficient. class OMPCanonicalLoop : public Stmt { friend class ASTStmtReader; friend class ASTStmtWriter; /// Children of this AST node. enum { LOOP_STMT, DISTANCE_FUNC, LOOPVAR_FUNC, LOOPVAR_REF, LastSubStmt = LOOPVAR_REF }; private: /// This AST node's children. Stmt *SubStmts[LastSubStmt + 1] = {}; OMPCanonicalLoop() : Stmt(StmtClass::OMPCanonicalLoopClass) {} public: /// Create a new OMPCanonicalLoop. static OMPCanonicalLoop *create(const ASTContext &Ctx, Stmt *LoopStmt, CapturedStmt *DistanceFunc, CapturedStmt *LoopVarFunc, DeclRefExpr *LoopVarRef) { OMPCanonicalLoop *S = new (Ctx) OMPCanonicalLoop(); S->setLoopStmt(LoopStmt); S->setDistanceFunc(DistanceFunc); S->setLoopVarFunc(LoopVarFunc); S->setLoopVarRef(LoopVarRef); return S; } /// Create an empty OMPCanonicalLoop for deserialization. static OMPCanonicalLoop *createEmpty(const ASTContext &Ctx) { return new (Ctx) OMPCanonicalLoop(); } static bool classof(const Stmt *S) { return S->getStmtClass() == StmtClass::OMPCanonicalLoopClass; } SourceLocation getBeginLoc() const { return getLoopStmt()->getBeginLoc(); } SourceLocation getEndLoc() const { return getLoopStmt()->getEndLoc(); } /// Return this AST node's children. /// @{ child_range children() { return child_range(&SubStmts[0], &SubStmts[0] + LastSubStmt + 1); } const_child_range children() const { return const_child_range(&SubStmts[0], &SubStmts[0] + LastSubStmt + 1); } /// @} /// The wrapped syntactic loop statement (ForStmt or CXXForRangeStmt). /// @{ Stmt *getLoopStmt() { return SubStmts[LOOP_STMT]; } const Stmt *getLoopStmt() const { return SubStmts[LOOP_STMT]; } void setLoopStmt(Stmt *S) { assert((isa<ForStmt>(S) || isa<CXXForRangeStmt>(S)) && "Canonical loop must be a for loop (range-based or otherwise)"); SubStmts[LOOP_STMT] = S; } /// @} /// The function that computes the number of loop iterations. Can be evaluated /// before entering the loop but after the syntactical loop's init /// statement(s). /// /// Function signature: void(LogicalTy &Result) /// Any values necessary to compute the distance are captures of the closure. /// @{ CapturedStmt *getDistanceFunc() { return cast<CapturedStmt>(SubStmts[DISTANCE_FUNC]); } const CapturedStmt *getDistanceFunc() const { return cast<CapturedStmt>(SubStmts[DISTANCE_FUNC]); } void setDistanceFunc(CapturedStmt *S) { assert(S && "Expected non-null captured statement"); SubStmts[DISTANCE_FUNC] = S; } /// @} /// The function that computes the loop user variable from a logical iteration /// counter. Can be evaluated as first statement in the loop. /// /// Function signature: void(LoopVarTy &Result, LogicalTy Number) /// Any other values required to compute the loop user variable (such as start /// value, step size) are captured by the closure. In particular, the initial /// value of loop iteration variable is captured by value to be unaffected by /// previous iterations. /// @{ CapturedStmt *getLoopVarFunc() { return cast<CapturedStmt>(SubStmts[LOOPVAR_FUNC]); } const CapturedStmt *getLoopVarFunc() const { return cast<CapturedStmt>(SubStmts[LOOPVAR_FUNC]); } void setLoopVarFunc(CapturedStmt *S) { assert(S && "Expected non-null captured statement"); SubStmts[LOOPVAR_FUNC] = S; } /// @} /// Reference to the loop user variable as accessed in the loop body. /// @{ DeclRefExpr *getLoopVarRef() { return cast<DeclRefExpr>(SubStmts[LOOPVAR_REF]); } const DeclRefExpr *getLoopVarRef() const { return cast<DeclRefExpr>(SubStmts[LOOPVAR_REF]); } void setLoopVarRef(DeclRefExpr *E) { assert(E && "Expected non-null loop variable"); SubStmts[LOOPVAR_REF] = E; } /// @} }; /// This is a basic class for representing single OpenMP executable /// directive. /// class OMPExecutableDirective : public Stmt { friend class ASTStmtReader; friend class ASTStmtWriter; /// Kind of the directive. OpenMPDirectiveKind Kind = llvm::omp::OMPD_unknown; /// Starting location of the directive (directive keyword). SourceLocation StartLoc; /// Ending location of the directive. SourceLocation EndLoc; /// Get the clauses storage. MutableArrayRef<OMPClause *> getClauses() { if (!Data) return llvm::None; return Data->getClauses(); } protected: /// Data, associated with the directive. OMPChildren *Data = nullptr; /// Build instance of directive of class \a K. /// /// \param SC Statement class. /// \param K Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// OMPExecutableDirective(StmtClass SC, OpenMPDirectiveKind K, SourceLocation StartLoc, SourceLocation EndLoc) : Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)), EndLoc(std::move(EndLoc)) {} template <typename T, typename... Params> static T *createDirective(const ASTContext &C, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, unsigned NumChildren, Params &&... P) { void *Mem = C.Allocate(sizeof(T) + OMPChildren::size(Clauses.size(), AssociatedStmt, NumChildren), alignof(T)); auto *Data = OMPChildren::Create(reinterpret_cast<T *>(Mem) + 1, Clauses, AssociatedStmt, NumChildren); auto *Inst = new (Mem) T(std::forward<Params>(P)...); Inst->Data = Data; return Inst; } template <typename T, typename... Params> static T *createEmptyDirective(const ASTContext &C, unsigned NumClauses, bool HasAssociatedStmt, unsigned NumChildren, Params &&... P) { void *Mem = C.Allocate(sizeof(T) + OMPChildren::size(NumClauses, HasAssociatedStmt, NumChildren), alignof(T)); auto *Data = OMPChildren::CreateEmpty(reinterpret_cast<T *>(Mem) + 1, NumClauses, HasAssociatedStmt, NumChildren); auto *Inst = new (Mem) T(std::forward<Params>(P)...); Inst->Data = Data; return Inst; } template <typename T> static T *createEmptyDirective(const ASTContext &C, unsigned NumClauses, bool HasAssociatedStmt = false, unsigned NumChildren = 0) { void *Mem = C.Allocate(sizeof(T) + OMPChildren::size(NumClauses, HasAssociatedStmt, NumChildren), alignof(T)); auto *Data = OMPChildren::CreateEmpty(reinterpret_cast<T *>(Mem) + 1, NumClauses, HasAssociatedStmt, NumChildren); auto *Inst = new (Mem) T; Inst->Data = Data; return Inst; } public: /// Iterates over expressions/statements used in the construct. class used_clauses_child_iterator : public llvm::iterator_adaptor_base< used_clauses_child_iterator, ArrayRef<OMPClause *>::iterator, std::forward_iterator_tag, Stmt *, ptrdiff_t, Stmt *, Stmt *> { ArrayRef<OMPClause *>::iterator End; OMPClause::child_iterator ChildI, ChildEnd; void MoveToNext() { if (ChildI != ChildEnd) return; while (this->I != End) { ++this->I; if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); if (ChildI != ChildEnd) return; } } } public: explicit used_clauses_child_iterator(ArrayRef<OMPClause *> Clauses) : used_clauses_child_iterator::iterator_adaptor_base(Clauses.begin()), End(Clauses.end()) { if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); MoveToNext(); } } Stmt *operator*() const { return *ChildI; } Stmt *operator->() const { return **this; } used_clauses_child_iterator &operator++() { ++ChildI; if (ChildI != ChildEnd) return *this; if (this->I != End) { ++this->I; if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); } } MoveToNext(); return *this; } }; static llvm::iterator_range<used_clauses_child_iterator> used_clauses_children(ArrayRef<OMPClause *> Clauses) { return {used_clauses_child_iterator(Clauses), used_clauses_child_iterator(llvm::makeArrayRef(Clauses.end(), 0))}; } /// Iterates over a filtered subrange of clauses applied to a /// directive. /// /// This iterator visits only clauses of type SpecificClause. template <typename SpecificClause> class specific_clause_iterator : public llvm::iterator_adaptor_base< specific_clause_iterator<SpecificClause>, ArrayRef<OMPClause *>::const_iterator, std::forward_iterator_tag, const SpecificClause *, ptrdiff_t, const SpecificClause *, const SpecificClause *> { ArrayRef<OMPClause *>::const_iterator End; void SkipToNextClause() { while (this->I != End && !isa<SpecificClause>(*this->I)) ++this->I; } public: explicit specific_clause_iterator(ArrayRef<OMPClause *> Clauses) : specific_clause_iterator::iterator_adaptor_base(Clauses.begin()), End(Clauses.end()) { SkipToNextClause(); } const SpecificClause *operator*() const { return cast<SpecificClause>(*this->I); } const SpecificClause *operator->() const { return **this; } specific_clause_iterator &operator++() { ++this->I; SkipToNextClause(); return *this; } }; template <typename SpecificClause> static llvm::iterator_range<specific_clause_iterator<SpecificClause>> getClausesOfKind(ArrayRef<OMPClause *> Clauses) { return {specific_clause_iterator<SpecificClause>(Clauses), specific_clause_iterator<SpecificClause>( llvm::makeArrayRef(Clauses.end(), 0))}; } template <typename SpecificClause> llvm::iterator_range<specific_clause_iterator<SpecificClause>> getClausesOfKind() const { return getClausesOfKind<SpecificClause>(clauses()); } /// Gets a single clause of the specified kind associated with the /// current directive iff there is only one clause of this kind (and assertion /// is fired if there is more than one clause is associated with the /// directive). Returns nullptr if no clause of this kind is associated with /// the directive. template <typename SpecificClause> static const SpecificClause *getSingleClause(ArrayRef<OMPClause *> Clauses) { auto ClausesOfKind = getClausesOfKind<SpecificClause>(Clauses); if (ClausesOfKind.begin() != ClausesOfKind.end()) { assert(std::next(ClausesOfKind.begin()) == ClausesOfKind.end() && "There are at least 2 clauses of the specified kind"); return *ClausesOfKind.begin(); } return nullptr; } template <typename SpecificClause> const SpecificClause *getSingleClause() const { return getSingleClause<SpecificClause>(clauses()); } /// Returns true if the current directive has one or more clauses of a /// specific kind. template <typename SpecificClause> bool hasClausesOfKind() const { auto Clauses = getClausesOfKind<SpecificClause>(); return Clauses.begin() != Clauses.end(); } /// Returns starting location of directive kind. SourceLocation getBeginLoc() const { return StartLoc; } /// Returns ending location of directive. SourceLocation getEndLoc() const { return EndLoc; } /// Set starting location of directive kind. /// /// \param Loc New starting location of directive. /// void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// Set ending location of directive. /// /// \param Loc New ending location of directive. /// void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// Get number of clauses. unsigned getNumClauses() const { if (!Data) return 0; return Data->getNumClauses(); } /// Returns specified clause. /// /// \param I Number of clause. /// OMPClause *getClause(unsigned I) const { return clauses()[I]; } /// Returns true if directive has associated statement. bool hasAssociatedStmt() const { return Data && Data->hasAssociatedStmt(); } /// Returns statement associated with the directive. const Stmt *getAssociatedStmt() const { return const_cast<OMPExecutableDirective *>(this)->getAssociatedStmt(); } Stmt *getAssociatedStmt() { assert(hasAssociatedStmt() && "Expected directive with the associated statement."); return Data->getAssociatedStmt(); } /// Returns the captured statement associated with the /// component region within the (combined) directive. /// /// \param RegionKind Component region kind. const CapturedStmt *getCapturedStmt(OpenMPDirectiveKind RegionKind) const { assert(hasAssociatedStmt() && "Expected directive with the associated statement."); SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind()); return Data->getCapturedStmt(RegionKind, CaptureRegions); } /// Get innermost captured statement for the construct. CapturedStmt *getInnermostCapturedStmt() { assert(hasAssociatedStmt() && "Expected directive with the associated statement."); SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind()); return Data->getInnermostCapturedStmt(CaptureRegions); } const CapturedStmt *getInnermostCapturedStmt() const { return const_cast<OMPExecutableDirective *>(this) ->getInnermostCapturedStmt(); } OpenMPDirectiveKind getDirectiveKind() const { return Kind; } static bool classof(const Stmt *S) { return S->getStmtClass() >= firstOMPExecutableDirectiveConstant && S->getStmtClass() <= lastOMPExecutableDirectiveConstant; } child_range children() { if (!Data) return child_range(child_iterator(), child_iterator()); return Data->getAssociatedStmtAsRange(); } const_child_range children() const { return const_cast<OMPExecutableDirective *>(this)->children(); } ArrayRef<OMPClause *> clauses() const { if (!Data) return llvm::None; return Data->getClauses(); } /// Returns whether or not this is a Standalone directive. /// /// Stand-alone directives are executable directives /// that have no associated user code. bool isStandaloneDirective() const; /// Returns the AST node representing OpenMP structured-block of this /// OpenMP executable directive, /// Prerequisite: Executable Directive must not be Standalone directive. const Stmt *getStructuredBlock() const { return const_cast<OMPExecutableDirective *>(this)->getStructuredBlock(); } Stmt *getStructuredBlock(); const Stmt *getRawStmt() const { return const_cast<OMPExecutableDirective *>(this)->getRawStmt(); } Stmt *getRawStmt() { assert(hasAssociatedStmt() && "Expected directive with the associated statement."); return Data->getRawStmt(); } }; /// This represents '#pragma omp parallel' directive. /// /// \code /// #pragma omp parallel private(a,b) reduction(+: c,d) /// \endcode /// In this example directive '#pragma omp parallel' has clauses 'private' /// with the variables 'a' and 'b' and 'reduction' with operator '+' and /// variables 'c' and 'd'. /// class OMPParallelDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending Location of the directive. /// OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPParallelDirectiveClass, llvm::omp::OMPD_parallel, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPParallelDirective() : OMPExecutableDirective(OMPParallelDirectiveClass, llvm::omp::OMPD_parallel, SourceLocation(), SourceLocation()) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement associated with the directive. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPParallelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[0]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPParallelDirective *>(this)->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelDirectiveClass; } }; /// The base class for all loop-based directives, including loop transformation /// directives. class OMPLoopBasedDirective : public OMPExecutableDirective { friend class ASTStmtReader; protected: /// Number of collapsed loops as specified by 'collapse' clause. unsigned NumAssociatedLoops = 0; /// Build instance of loop directive of class \a Kind. /// /// \param SC Statement class. /// \param Kind Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// \param NumAssociatedLoops Number of loops associated with the construct. /// OMPLoopBasedDirective(StmtClass SC, OpenMPDirectiveKind Kind, SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumAssociatedLoops) : OMPExecutableDirective(SC, Kind, StartLoc, EndLoc), NumAssociatedLoops(NumAssociatedLoops) {} public: /// The expressions built to support OpenMP loops in combined/composite /// pragmas (e.g. pragma omp distribute parallel for) struct DistCombinedHelperExprs { /// DistributeLowerBound - used when composing 'omp distribute' with /// 'omp for' in a same construct. Expr *LB; /// DistributeUpperBound - used when composing 'omp distribute' with /// 'omp for' in a same construct. Expr *UB; /// DistributeEnsureUpperBound - used when composing 'omp distribute' /// with 'omp for' in a same construct, EUB depends on DistUB Expr *EUB; /// Distribute loop iteration variable init used when composing 'omp /// distribute' /// with 'omp for' in a same construct Expr *Init; /// Distribute Loop condition used when composing 'omp distribute' /// with 'omp for' in a same construct Expr *Cond; /// Update of LowerBound for statically scheduled omp loops for /// outer loop in combined constructs (e.g. 'distribute parallel for') Expr *NLB; /// Update of UpperBound for statically scheduled omp loops for /// outer loop in combined constructs (e.g. 'distribute parallel for') Expr *NUB; /// Distribute Loop condition used when composing 'omp distribute' /// with 'omp for' in a same construct when schedule is chunked. Expr *DistCond; /// 'omp parallel for' loop condition used when composed with /// 'omp distribute' in the same construct and when schedule is /// chunked and the chunk size is 1. Expr *ParForInDistCond; }; /// The expressions built for the OpenMP loop CodeGen for the /// whole collapsed loop nest. struct HelperExprs { /// Loop iteration variable. Expr *IterationVarRef; /// Loop last iteration number. Expr *LastIteration; /// Loop number of iterations. Expr *NumIterations; /// Calculation of last iteration. Expr *CalcLastIteration; /// Loop pre-condition. Expr *PreCond; /// Loop condition. Expr *Cond; /// Loop iteration variable init. Expr *Init; /// Loop increment. Expr *Inc; /// IsLastIteration - local flag variable passed to runtime. Expr *IL; /// LowerBound - local variable passed to runtime. Expr *LB; /// UpperBound - local variable passed to runtime. Expr *UB; /// Stride - local variable passed to runtime. Expr *ST; /// EnsureUpperBound -- expression UB = min(UB, NumIterations). Expr *EUB; /// Update of LowerBound for statically scheduled 'omp for' loops. Expr *NLB; /// Update of UpperBound for statically scheduled 'omp for' loops. Expr *NUB; /// PreviousLowerBound - local variable passed to runtime in the /// enclosing schedule or null if that does not apply. Expr *PrevLB; /// PreviousUpperBound - local variable passed to runtime in the /// enclosing schedule or null if that does not apply. Expr *PrevUB; /// DistInc - increment expression for distribute loop when found /// combined with a further loop level (e.g. in 'distribute parallel for') /// expression IV = IV + ST Expr *DistInc; /// PrevEUB - expression similar to EUB but to be used when loop /// scheduling uses PrevLB and PrevUB (e.g. in 'distribute parallel for' /// when ensuring that the UB is either the calculated UB by the runtime or /// the end of the assigned distribute chunk) /// expression UB = min (UB, PrevUB) Expr *PrevEUB; /// Counters Loop counters. SmallVector<Expr *, 4> Counters; /// PrivateCounters Loop counters. SmallVector<Expr *, 4> PrivateCounters; /// Expressions for loop counters inits for CodeGen. SmallVector<Expr *, 4> Inits; /// Expressions for loop counters update for CodeGen. SmallVector<Expr *, 4> Updates; /// Final loop counter values for GodeGen. SmallVector<Expr *, 4> Finals; /// List of counters required for the generation of the non-rectangular /// loops. SmallVector<Expr *, 4> DependentCounters; /// List of initializers required for the generation of the non-rectangular /// loops. SmallVector<Expr *, 4> DependentInits; /// List of final conditions required for the generation of the /// non-rectangular loops. SmallVector<Expr *, 4> FinalsConditions; /// Init statement for all captured expressions. Stmt *PreInits; /// Expressions used when combining OpenMP loop pragmas DistCombinedHelperExprs DistCombinedFields; /// Check if all the expressions are built (does not check the /// worksharing ones). bool builtAll() { return IterationVarRef != nullptr && LastIteration != nullptr && NumIterations != nullptr && PreCond != nullptr && Cond != nullptr && Init != nullptr && Inc != nullptr; } /// Initialize all the fields to null. /// \param Size Number of elements in the /// counters/finals/updates/dependent_counters/dependent_inits/finals_conditions /// arrays. void clear(unsigned Size) { IterationVarRef = nullptr; LastIteration = nullptr; CalcLastIteration = nullptr; PreCond = nullptr; Cond = nullptr; Init = nullptr; Inc = nullptr; IL = nullptr; LB = nullptr; UB = nullptr; ST = nullptr; EUB = nullptr; NLB = nullptr; NUB = nullptr; NumIterations = nullptr; PrevLB = nullptr; PrevUB = nullptr; DistInc = nullptr; PrevEUB = nullptr; Counters.resize(Size); PrivateCounters.resize(Size); Inits.resize(Size); Updates.resize(Size); Finals.resize(Size); DependentCounters.resize(Size); DependentInits.resize(Size); FinalsConditions.resize(Size); for (unsigned I = 0; I < Size; ++I) { Counters[I] = nullptr; PrivateCounters[I] = nullptr; Inits[I] = nullptr; Updates[I] = nullptr; Finals[I] = nullptr; DependentCounters[I] = nullptr; DependentInits[I] = nullptr; FinalsConditions[I] = nullptr; } PreInits = nullptr; DistCombinedFields.LB = nullptr; DistCombinedFields.UB = nullptr; DistCombinedFields.EUB = nullptr; DistCombinedFields.Init = nullptr; DistCombinedFields.Cond = nullptr; DistCombinedFields.NLB = nullptr; DistCombinedFields.NUB = nullptr; DistCombinedFields.DistCond = nullptr; DistCombinedFields.ParForInDistCond = nullptr; } }; /// Get number of collapsed loops. unsigned getLoopsNumber() const { return NumAssociatedLoops; } /// Try to find the next loop sub-statement in the specified statement \p /// CurStmt. /// \param TryImperfectlyNestedLoops true, if we need to try to look for the /// imperfectly nested loop. static Stmt *tryToFindNextInnerLoop(Stmt *CurStmt, bool TryImperfectlyNestedLoops); static const Stmt *tryToFindNextInnerLoop(const Stmt *CurStmt, bool TryImperfectlyNestedLoops) { return tryToFindNextInnerLoop(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops); } /// Calls the specified callback function for all the loops in \p CurStmt, /// from the outermost to the innermost. static bool doForAllLoops(Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, llvm::function_ref<bool(unsigned, Stmt *)> Callback, llvm::function_ref<void(OMPLoopTransformationDirective *)> OnTransformationCallback); static bool doForAllLoops(const Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, llvm::function_ref<bool(unsigned, const Stmt *)> Callback, llvm::function_ref<void(const OMPLoopTransformationDirective *)> OnTransformationCallback) { auto &&NewCallback = [Callback](unsigned Cnt, Stmt *CurStmt) { return Callback(Cnt, CurStmt); }; auto &&NewTransformCb = [OnTransformationCallback](OMPLoopTransformationDirective *A) { OnTransformationCallback(A); }; return doForAllLoops(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops, NumLoops, NewCallback, NewTransformCb); } /// Calls the specified callback function for all the loops in \p CurStmt, /// from the outermost to the innermost. static bool doForAllLoops(Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, llvm::function_ref<bool(unsigned, Stmt *)> Callback) { auto &&TransformCb = [](OMPLoopTransformationDirective *) {}; return doForAllLoops(CurStmt, TryImperfectlyNestedLoops, NumLoops, Callback, TransformCb); } static bool doForAllLoops(const Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, llvm::function_ref<bool(unsigned, const Stmt *)> Callback) { auto &&NewCallback = [Callback](unsigned Cnt, const Stmt *CurStmt) { return Callback(Cnt, CurStmt); }; return doForAllLoops(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops, NumLoops, NewCallback); } /// Calls the specified callback function for all the loop bodies in \p /// CurStmt, from the outermost loop to the innermost. static void doForAllLoopsBodies( Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, llvm::function_ref<void(unsigned, Stmt *, Stmt *)> Callback); static void doForAllLoopsBodies( const Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops, llvm::function_ref<void(unsigned, const Stmt *, const Stmt *)> Callback) { auto &&NewCallback = [Callback](unsigned Cnt, Stmt *Loop, Stmt *Body) { Callback(Cnt, Loop, Body); }; doForAllLoopsBodies(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops, NumLoops, NewCallback); } static bool classof(const Stmt *T) { if (auto *D = dyn_cast<OMPExecutableDirective>(T)) return isOpenMPLoopDirective(D->getDirectiveKind()); return false; } }; /// The base class for all loop transformation directives. class OMPLoopTransformationDirective : public OMPLoopBasedDirective { friend class ASTStmtReader; /// Number of loops generated by this loop transformation. unsigned NumGeneratedLoops = 0; protected: explicit OMPLoopTransformationDirective(StmtClass SC, OpenMPDirectiveKind Kind, SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumAssociatedLoops) : OMPLoopBasedDirective(SC, Kind, StartLoc, EndLoc, NumAssociatedLoops) {} /// Set the number of loops generated by this loop transformation. void setNumGeneratedLoops(unsigned Num) { NumGeneratedLoops = Num; } public: /// Return the number of associated (consumed) loops. unsigned getNumAssociatedLoops() const { return getLoopsNumber(); } /// Return the number of loops generated by this loop transformation. unsigned getNumGeneratedLoops() { return NumGeneratedLoops; } /// Get the de-sugared statements after after the loop transformation. /// /// Might be nullptr if either the directive generates no loops and is handled /// directly in CodeGen, or resolving a template-dependence context is /// required. Stmt *getTransformedStmt() const; /// Return preinits statement. Stmt *getPreInits() const; static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTileDirectiveClass || T->getStmtClass() == OMPUnrollDirectiveClass; } }; /// This is a common base class for loop directives ('omp simd', 'omp /// for', 'omp for simd' etc.). It is responsible for the loop code generation. /// class OMPLoopDirective : public OMPLoopBasedDirective { friend class ASTStmtReader; /// Offsets to the stored exprs. /// This enumeration contains offsets to all the pointers to children /// expressions stored in OMPLoopDirective. /// The first 9 children are necessary for all the loop directives, /// the next 8 are specific to the worksharing ones, and the next 11 are /// used for combined constructs containing two pragmas associated to loops. /// After the fixed children, three arrays of length NumAssociatedLoops are /// allocated: loop counters, their updates and final values. /// PrevLowerBound and PrevUpperBound are used to communicate blocking /// information in composite constructs which require loop blocking /// DistInc is used to generate the increment expression for the distribute /// loop when combined with a further nested loop /// PrevEnsureUpperBound is used as the EnsureUpperBound expression for the /// for loop when combined with a previous distribute loop in the same pragma /// (e.g. 'distribute parallel for') /// enum { IterationVariableOffset = 0, LastIterationOffset = 1, CalcLastIterationOffset = 2, PreConditionOffset = 3, CondOffset = 4, InitOffset = 5, IncOffset = 6, PreInitsOffset = 7, // The '...End' enumerators do not correspond to child expressions - they // specify the offset to the end (and start of the following counters/ // updates/finals/dependent_counters/dependent_inits/finals_conditions // arrays). DefaultEnd = 8, // The following 8 exprs are used by worksharing and distribute loops only. IsLastIterVariableOffset = 8, LowerBoundVariableOffset = 9, UpperBoundVariableOffset = 10, StrideVariableOffset = 11, EnsureUpperBoundOffset = 12, NextLowerBoundOffset = 13, NextUpperBoundOffset = 14, NumIterationsOffset = 15, // Offset to the end for worksharing loop directives. WorksharingEnd = 16, PrevLowerBoundVariableOffset = 16, PrevUpperBoundVariableOffset = 17, DistIncOffset = 18, PrevEnsureUpperBoundOffset = 19, CombinedLowerBoundVariableOffset = 20, CombinedUpperBoundVariableOffset = 21, CombinedEnsureUpperBoundOffset = 22, CombinedInitOffset = 23, CombinedConditionOffset = 24, CombinedNextLowerBoundOffset = 25, CombinedNextUpperBoundOffset = 26, CombinedDistConditionOffset = 27, CombinedParForInDistConditionOffset = 28, // Offset to the end (and start of the following // counters/updates/finals/dependent_counters/dependent_inits/finals_conditions // arrays) for combined distribute loop directives. CombinedDistributeEnd = 29, }; /// Get the counters storage. MutableArrayRef<Expr *> getCounters() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind())]); return llvm::makeMutableArrayRef(Storage, getLoopsNumber()); } /// Get the private counters storage. MutableArrayRef<Expr *> getPrivateCounters() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + getLoopsNumber()]); return llvm::makeMutableArrayRef(Storage, getLoopsNumber()); } /// Get the updates storage. MutableArrayRef<Expr *> getInits() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + 2 * getLoopsNumber()]); return llvm::makeMutableArrayRef(Storage, getLoopsNumber()); } /// Get the updates storage. MutableArrayRef<Expr *> getUpdates() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + 3 * getLoopsNumber()]); return llvm::makeMutableArrayRef(Storage, getLoopsNumber()); } /// Get the final counter updates storage. MutableArrayRef<Expr *> getFinals() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + 4 * getLoopsNumber()]); return llvm::makeMutableArrayRef(Storage, getLoopsNumber()); } /// Get the dependent counters storage. MutableArrayRef<Expr *> getDependentCounters() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + 5 * getLoopsNumber()]); return llvm::makeMutableArrayRef(Storage, getLoopsNumber()); } /// Get the dependent inits storage. MutableArrayRef<Expr *> getDependentInits() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + 6 * getLoopsNumber()]); return llvm::makeMutableArrayRef(Storage, getLoopsNumber()); } /// Get the finals conditions storage. MutableArrayRef<Expr *> getFinalsConditions() { auto **Storage = reinterpret_cast<Expr **>( &Data->getChildren()[getArraysOffset(getDirectiveKind()) + 7 * getLoopsNumber()]); return llvm::makeMutableArrayRef(Storage, getLoopsNumber()); } protected: /// Build instance of loop directive of class \a Kind. /// /// \param SC Statement class. /// \param Kind Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed loops from 'collapse' clause. /// OMPLoopDirective(StmtClass SC, OpenMPDirectiveKind Kind, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopBasedDirective(SC, Kind, StartLoc, EndLoc, CollapsedNum) {} /// Offset to the start of children expression arrays. static unsigned getArraysOffset(OpenMPDirectiveKind Kind) { if (isOpenMPLoopBoundSharingDirective(Kind)) return CombinedDistributeEnd; if (isOpenMPWorksharingDirective(Kind) || isOpenMPTaskLoopDirective(Kind) || isOpenMPDistributeDirective(Kind)) return WorksharingEnd; return DefaultEnd; } /// Children number. static unsigned numLoopChildren(unsigned CollapsedNum, OpenMPDirectiveKind Kind) { return getArraysOffset(Kind) + 8 * CollapsedNum; // Counters, PrivateCounters, Inits, // Updates, Finals, DependentCounters, // DependentInits, FinalsConditions. } void setIterationVariable(Expr *IV) { Data->getChildren()[IterationVariableOffset] = IV; } void setLastIteration(Expr *LI) { Data->getChildren()[LastIterationOffset] = LI; } void setCalcLastIteration(Expr *CLI) { Data->getChildren()[CalcLastIterationOffset] = CLI; } void setPreCond(Expr *PC) { Data->getChildren()[PreConditionOffset] = PC; } void setCond(Expr *Cond) { Data->getChildren()[CondOffset] = Cond; } void setInit(Expr *Init) { Data->getChildren()[InitOffset] = Init; } void setInc(Expr *Inc) { Data->getChildren()[IncOffset] = Inc; } void setPreInits(Stmt *PreInits) { Data->getChildren()[PreInitsOffset] = PreInits; } void setIsLastIterVariable(Expr *IL) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[IsLastIterVariableOffset] = IL; } void setLowerBoundVariable(Expr *LB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[LowerBoundVariableOffset] = LB; } void setUpperBoundVariable(Expr *UB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[UpperBoundVariableOffset] = UB; } void setStrideVariable(Expr *ST) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[StrideVariableOffset] = ST; } void setEnsureUpperBound(Expr *EUB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[EnsureUpperBoundOffset] = EUB; } void setNextLowerBound(Expr *NLB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[NextLowerBoundOffset] = NLB; } void setNextUpperBound(Expr *NUB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[NextUpperBoundOffset] = NUB; } void setNumIterations(Expr *NI) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); Data->getChildren()[NumIterationsOffset] = NI; } void setPrevLowerBoundVariable(Expr *PrevLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[PrevLowerBoundVariableOffset] = PrevLB; } void setPrevUpperBoundVariable(Expr *PrevUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[PrevUpperBoundVariableOffset] = PrevUB; } void setDistInc(Expr *DistInc) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[DistIncOffset] = DistInc; } void setPrevEnsureUpperBound(Expr *PrevEUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[PrevEnsureUpperBoundOffset] = PrevEUB; } void setCombinedLowerBoundVariable(Expr *CombLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedLowerBoundVariableOffset] = CombLB; } void setCombinedUpperBoundVariable(Expr *CombUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedUpperBoundVariableOffset] = CombUB; } void setCombinedEnsureUpperBound(Expr *CombEUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedEnsureUpperBoundOffset] = CombEUB; } void setCombinedInit(Expr *CombInit) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedInitOffset] = CombInit; } void setCombinedCond(Expr *CombCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedConditionOffset] = CombCond; } void setCombinedNextLowerBound(Expr *CombNLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedNextLowerBoundOffset] = CombNLB; } void setCombinedNextUpperBound(Expr *CombNUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); Data->getChildren()[CombinedNextUpperBoundOffset] = CombNUB; } void setCombinedDistCond(Expr *CombDistCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); Data->getChildren()[CombinedDistConditionOffset] = CombDistCond; } void setCombinedParForInDistCond(Expr *CombParForInDistCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); Data->getChildren()[CombinedParForInDistConditionOffset] = CombParForInDistCond; } void setCounters(ArrayRef<Expr *> A); void setPrivateCounters(ArrayRef<Expr *> A); void setInits(ArrayRef<Expr *> A); void setUpdates(ArrayRef<Expr *> A); void setFinals(ArrayRef<Expr *> A); void setDependentCounters(ArrayRef<Expr *> A); void setDependentInits(ArrayRef<Expr *> A); void setFinalsConditions(ArrayRef<Expr *> A); public: Expr *getIterationVariable() const { return cast<Expr>(Data->getChildren()[IterationVariableOffset]); } Expr *getLastIteration() const { return cast<Expr>(Data->getChildren()[LastIterationOffset]); } Expr *getCalcLastIteration() const { return cast<Expr>(Data->getChildren()[CalcLastIterationOffset]); } Expr *getPreCond() const { return cast<Expr>(Data->getChildren()[PreConditionOffset]); } Expr *getCond() const { return cast<Expr>(Data->getChildren()[CondOffset]); } Expr *getInit() const { return cast<Expr>(Data->getChildren()[InitOffset]); } Expr *getInc() const { return cast<Expr>(Data->getChildren()[IncOffset]); } const Stmt *getPreInits() const { return Data->getChildren()[PreInitsOffset]; } Stmt *getPreInits() { return Data->getChildren()[PreInitsOffset]; } Expr *getIsLastIterVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[IsLastIterVariableOffset]); } Expr *getLowerBoundVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[LowerBoundVariableOffset]); } Expr *getUpperBoundVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[UpperBoundVariableOffset]); } Expr *getStrideVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[StrideVariableOffset]); } Expr *getEnsureUpperBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[EnsureUpperBoundOffset]); } Expr *getNextLowerBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[NextLowerBoundOffset]); } Expr *getNextUpperBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[NextUpperBoundOffset]); } Expr *getNumIterations() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return cast<Expr>(Data->getChildren()[NumIterationsOffset]); } Expr *getPrevLowerBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[PrevLowerBoundVariableOffset]); } Expr *getPrevUpperBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[PrevUpperBoundVariableOffset]); } Expr *getDistInc() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[DistIncOffset]); } Expr *getPrevEnsureUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[PrevEnsureUpperBoundOffset]); } Expr *getCombinedLowerBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedLowerBoundVariableOffset]); } Expr *getCombinedUpperBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedUpperBoundVariableOffset]); } Expr *getCombinedEnsureUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedEnsureUpperBoundOffset]); } Expr *getCombinedInit() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedInitOffset]); } Expr *getCombinedCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedConditionOffset]); } Expr *getCombinedNextLowerBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedNextLowerBoundOffset]); } Expr *getCombinedNextUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return cast<Expr>(Data->getChildren()[CombinedNextUpperBoundOffset]); } Expr *getCombinedDistCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); return cast<Expr>(Data->getChildren()[CombinedDistConditionOffset]); } Expr *getCombinedParForInDistCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); return cast<Expr>(Data->getChildren()[CombinedParForInDistConditionOffset]); } Stmt *getBody(); const Stmt *getBody() const { return const_cast<OMPLoopDirective *>(this)->getBody(); } ArrayRef<Expr *> counters() { return getCounters(); } ArrayRef<Expr *> counters() const { return const_cast<OMPLoopDirective *>(this)->getCounters(); } ArrayRef<Expr *> private_counters() { return getPrivateCounters(); } ArrayRef<Expr *> private_counters() const { return const_cast<OMPLoopDirective *>(this)->getPrivateCounters(); } ArrayRef<Expr *> inits() { return getInits(); } ArrayRef<Expr *> inits() const { return const_cast<OMPLoopDirective *>(this)->getInits(); } ArrayRef<Expr *> updates() { return getUpdates(); } ArrayRef<Expr *> updates() const { return const_cast<OMPLoopDirective *>(this)->getUpdates(); } ArrayRef<Expr *> finals() { return getFinals(); } ArrayRef<Expr *> finals() const { return const_cast<OMPLoopDirective *>(this)->getFinals(); } ArrayRef<Expr *> dependent_counters() { return getDependentCounters(); } ArrayRef<Expr *> dependent_counters() const { return const_cast<OMPLoopDirective *>(this)->getDependentCounters(); } ArrayRef<Expr *> dependent_inits() { return getDependentInits(); } ArrayRef<Expr *> dependent_inits() const { return const_cast<OMPLoopDirective *>(this)->getDependentInits(); } ArrayRef<Expr *> finals_conditions() { return getFinalsConditions(); } ArrayRef<Expr *> finals_conditions() const { return const_cast<OMPLoopDirective *>(this)->getFinalsConditions(); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSimdDirectiveClass || T->getStmtClass() == OMPForDirectiveClass || T->getStmtClass() == OMPForSimdDirectiveClass || T->getStmtClass() == OMPParallelForDirectiveClass || T->getStmtClass() == OMPParallelForSimdDirectiveClass || T->getStmtClass() == OMPTaskLoopDirectiveClass || T->getStmtClass() == OMPTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPMasterTaskLoopDirectiveClass || T->getStmtClass() == OMPMasterTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass || T->getStmtClass() == OMPParallelMasterTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPDistributeDirectiveClass || T->getStmtClass() == OMPTargetParallelForDirectiveClass || T->getStmtClass() == OMPDistributeParallelForDirectiveClass || T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPDistributeSimdDirectiveClass || T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass || T->getStmtClass() == OMPTargetSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeDirectiveClass || T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeParallelForDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp simd' directive. /// /// \code /// #pragma omp simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp simd' has clauses 'private' /// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and /// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'. /// class OMPSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPSimdDirectiveClass, llvm::omp::OMPD_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPSimdDirectiveClass, llvm::omp::OMPD_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPSimdDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSimdDirectiveClass; } }; /// This represents '#pragma omp for' directive. /// /// \code /// #pragma omp for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp for' has clauses 'private' with the /// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c' /// and 'd'. /// class OMPForDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if current directive has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPForDirectiveClass, llvm::omp::OMPD_for, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPForDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPForDirectiveClass, llvm::omp::OMPD_for, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[numLoopChildren(getLoopsNumber(), llvm::omp::OMPD_for)] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_for)]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPForDirective *>(this)->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPForDirectiveClass; } }; /// This represents '#pragma omp for simd' directive. /// /// \code /// #pragma omp for simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp for simd' has clauses 'private' /// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and /// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'. /// class OMPForSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPForSimdDirectiveClass, llvm::omp::OMPD_for_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPForSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPForSimdDirectiveClass, llvm::omp::OMPD_for_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPForSimdDirectiveClass; } }; /// This represents '#pragma omp sections' directive. /// /// \code /// #pragma omp sections private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp sections' has clauses 'private' with /// the variables 'a' and 'b' and 'reduction' with operator '+' and variables /// 'c' and 'd'. /// class OMPSectionsDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if current directive has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPSectionsDirectiveClass, llvm::omp::OMPD_sections, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPSectionsDirective() : OMPExecutableDirective(OMPSectionsDirectiveClass, llvm::omp::OMPD_sections, SourceLocation(), SourceLocation()) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if current directive has inner directive. /// static OMPSectionsDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPSectionsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[0]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPSectionsDirective *>(this)->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSectionsDirectiveClass; } }; /// This represents '#pragma omp section' directive. /// /// \code /// #pragma omp section /// \endcode /// class OMPSectionDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if current directive has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPSectionDirectiveClass, llvm::omp::OMPD_section, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPSectionDirective() : OMPExecutableDirective(OMPSectionDirectiveClass, llvm::omp::OMPD_section, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner directive. /// static OMPSectionDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive. /// /// \param C AST context. /// static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell); /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSectionDirectiveClass; } }; /// This represents '#pragma omp single' directive. /// /// \code /// #pragma omp single private(a,b) copyprivate(c,d) /// \endcode /// In this example directive '#pragma omp single' has clauses 'private' with /// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'. /// class OMPSingleDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPSingleDirectiveClass, llvm::omp::OMPD_single, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPSingleDirective() : OMPExecutableDirective(OMPSingleDirectiveClass, llvm::omp::OMPD_single, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPSingleDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPSingleDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSingleDirectiveClass; } }; /// This represents '#pragma omp master' directive. /// /// \code /// #pragma omp master /// \endcode /// class OMPMasterDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPMasterDirectiveClass, llvm::omp::OMPD_master, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPMasterDirective() : OMPExecutableDirective(OMPMasterDirectiveClass, llvm::omp::OMPD_master, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPMasterDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterDirectiveClass; } }; /// This represents '#pragma omp critical' directive. /// /// \code /// #pragma omp critical /// \endcode /// class OMPCriticalDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Name of the directive. DeclarationNameInfo DirName; /// Build directive with the given start and end location. /// /// \param Name Name of the directive. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPCriticalDirectiveClass, llvm::omp::OMPD_critical, StartLoc, EndLoc), DirName(Name) {} /// Build an empty directive. /// explicit OMPCriticalDirective() : OMPExecutableDirective(OMPCriticalDirectiveClass, llvm::omp::OMPD_critical, SourceLocation(), SourceLocation()) {} /// Set name of the directive. /// /// \param Name Name of the directive. /// void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; } public: /// Creates directive. /// /// \param C AST context. /// \param Name Name of the directive. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPCriticalDirective * Create(const ASTContext &C, const DeclarationNameInfo &Name, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPCriticalDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return name of the directive. /// DeclarationNameInfo getDirectiveName() const { return DirName; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCriticalDirectiveClass; } }; /// This represents '#pragma omp parallel for' directive. /// /// \code /// #pragma omp parallel for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel for' has clauses 'private' /// with the variables 'a' and 'b' and 'reduction' with operator '+' and /// variables 'c' and 'd'. /// class OMPParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if current region has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPParallelForDirectiveClass, llvm::omp::OMPD_parallel_for, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPParallelForDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPParallelForDirectiveClass, llvm::omp::OMPD_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[numLoopChildren(getLoopsNumber(), llvm::omp::OMPD_parallel_for)] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_parallel_for)]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPParallelForDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelForDirectiveClass; } }; /// This represents '#pragma omp parallel for simd' directive. /// /// \code /// #pragma omp parallel for simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel for simd' has clauses /// 'private' with the variables 'a' and 'b', 'linear' with variables 'i', 'j' /// and linear step 's', 'reduction' with operator '+' and variables 'c' and /// 'd'. /// class OMPParallelForSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPParallelForSimdDirectiveClass, llvm::omp::OMPD_parallel_for_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPParallelForSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPParallelForSimdDirectiveClass, llvm::omp::OMPD_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp parallel master' directive. /// /// \code /// #pragma omp parallel master private(a,b) /// \endcode /// In this example directive '#pragma omp parallel master' has clauses /// 'private' with the variables 'a' and 'b' /// class OMPParallelMasterDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; OMPParallelMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPParallelMasterDirectiveClass, llvm::omp::OMPD_parallel_master, StartLoc, EndLoc) {} explicit OMPParallelMasterDirective() : OMPExecutableDirective(OMPParallelMasterDirectiveClass, llvm::omp::OMPD_parallel_master, SourceLocation(), SourceLocation()) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// static OMPParallelMasterDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelMasterDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[0]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPParallelMasterDirective *>(this) ->getTaskReductionRefExpr(); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelMasterDirectiveClass; } }; /// This represents '#pragma omp parallel sections' directive. /// /// \code /// #pragma omp parallel sections private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel sections' has clauses /// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+' /// and variables 'c' and 'd'. /// class OMPParallelSectionsDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if current directive has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPParallelSectionsDirectiveClass, llvm::omp::OMPD_parallel_sections, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPParallelSectionsDirective() : OMPExecutableDirective(OMPParallelSectionsDirectiveClass, llvm::omp::OMPD_parallel_sections, SourceLocation(), SourceLocation()) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPParallelSectionsDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelSectionsDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[0]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPParallelSectionsDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelSectionsDirectiveClass; } }; /// This represents '#pragma omp task' directive. /// /// \code /// #pragma omp task private(a,b) final(d) /// \endcode /// In this example directive '#pragma omp task' has clauses 'private' with the /// variables 'a' and 'b' and 'final' with condition 'd'. /// class OMPTaskDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if this directive has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTaskDirectiveClass, llvm::omp::OMPD_task, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTaskDirective() : OMPExecutableDirective(OMPTaskDirectiveClass, llvm::omp::OMPD_task, SourceLocation(), SourceLocation()) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true, if current directive has inner cancel directive. /// static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskDirectiveClass; } }; /// This represents '#pragma omp taskyield' directive. /// /// \code /// #pragma omp taskyield /// \endcode /// class OMPTaskyieldDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTaskyieldDirectiveClass, llvm::omp::OMPD_taskyield, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTaskyieldDirective() : OMPExecutableDirective(OMPTaskyieldDirectiveClass, llvm::omp::OMPD_taskyield, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPTaskyieldDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskyieldDirectiveClass; } }; /// This represents '#pragma omp barrier' directive. /// /// \code /// #pragma omp barrier /// \endcode /// class OMPBarrierDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPBarrierDirectiveClass, llvm::omp::OMPD_barrier, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPBarrierDirective() : OMPExecutableDirective(OMPBarrierDirectiveClass, llvm::omp::OMPD_barrier, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPBarrierDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPBarrierDirectiveClass; } }; /// This represents '#pragma omp taskwait' directive. /// /// \code /// #pragma omp taskwait /// \endcode /// class OMPTaskwaitDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTaskwaitDirectiveClass, llvm::omp::OMPD_taskwait, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTaskwaitDirective() : OMPExecutableDirective(OMPTaskwaitDirectiveClass, llvm::omp::OMPD_taskwait, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPTaskwaitDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskwaitDirectiveClass; } }; /// This represents '#pragma omp taskgroup' directive. /// /// \code /// #pragma omp taskgroup /// \endcode /// class OMPTaskgroupDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskgroupDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTaskgroupDirectiveClass, llvm::omp::OMPD_taskgroup, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTaskgroupDirective() : OMPExecutableDirective(OMPTaskgroupDirectiveClass, llvm::omp::OMPD_taskgroup, SourceLocation(), SourceLocation()) {} /// Sets the task_reduction return variable. void setReductionRef(Expr *RR) { Data->getChildren()[0] = RR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param ReductionRef Reference to the task_reduction return variable. /// static OMPTaskgroupDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *ReductionRef); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTaskgroupDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns reference to the task_reduction return variable. const Expr *getReductionRef() const { return const_cast<OMPTaskgroupDirective *>(this)->getReductionRef(); } Expr *getReductionRef() { return cast_or_null<Expr>(Data->getChildren()[0]); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskgroupDirectiveClass; } }; /// This represents '#pragma omp flush' directive. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has 2 arguments- variables 'a' /// and 'b'. /// 'omp flush' directive does not have clauses but have an optional list of /// variables to flush. This list of variables is stored within some fake clause /// FlushClause. class OMPFlushDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPFlushDirectiveClass, llvm::omp::OMPD_flush, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPFlushDirective() : OMPExecutableDirective(OMPFlushDirectiveClass, llvm::omp::OMPD_flush, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses (only single OMPFlushClause clause is /// allowed). /// static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPFlushDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPFlushDirectiveClass; } }; /// This represents '#pragma omp depobj' directive. /// /// \code /// #pragma omp depobj(a) depend(in:x,y) /// \endcode /// In this example directive '#pragma omp depobj' initializes a depobj object /// 'a' with dependence type 'in' and a list with 'x' and 'y' locators. class OMPDepobjDirective final : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPDepobjDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPDepobjDirectiveClass, llvm::omp::OMPD_depobj, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPDepobjDirective() : OMPExecutableDirective(OMPDepobjDirectiveClass, llvm::omp::OMPD_depobj, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// static OMPDepobjDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPDepobjDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDepobjDirectiveClass; } }; /// This represents '#pragma omp ordered' directive. /// /// \code /// #pragma omp ordered /// \endcode /// class OMPOrderedDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPOrderedDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPOrderedDirectiveClass, llvm::omp::OMPD_ordered, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPOrderedDirective() : OMPExecutableDirective(OMPOrderedDirectiveClass, llvm::omp::OMPD_ordered, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPOrderedDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// \param IsStandalone true, if the the standalone directive is created. /// static OMPOrderedDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, bool IsStandalone, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPOrderedDirectiveClass; } }; /// This represents '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has clause 'capture'. /// class OMPAtomicDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Used for 'atomic update' or 'atomic capture' constructs. They may /// have atomic expressions of forms /// \code /// x = x binop expr; /// x = expr binop x; /// \endcode /// This field is true for the first form of the expression and false for the /// second. Required for correct codegen of non-associative operations (like /// << or >>). bool IsXLHSInRHSPart = false; /// Used for 'atomic update' or 'atomic capture' constructs. They may /// have atomic expressions of forms /// \code /// v = x; <update x>; /// <update x>; v = x; /// \endcode /// This field is true for the first(postfix) form of the expression and false /// otherwise. bool IsPostfixUpdate = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPAtomicDirectiveClass, llvm::omp::OMPD_atomic, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPAtomicDirective() : OMPExecutableDirective(OMPAtomicDirectiveClass, llvm::omp::OMPD_atomic, SourceLocation(), SourceLocation()) {} enum DataPositionTy : size_t { POS_X = 0, POS_V, POS_E, POS_UpdateExpr, }; /// Set 'x' part of the associated expression/statement. void setX(Expr *X) { Data->getChildren()[DataPositionTy::POS_X] = X; } /// Set helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. void setUpdateExpr(Expr *UE) { Data->getChildren()[DataPositionTy::POS_UpdateExpr] = UE; } /// Set 'v' part of the associated expression/statement. void setV(Expr *V) { Data->getChildren()[DataPositionTy::POS_V] = V; } /// Set 'expr' part of the associated expression/statement. void setExpr(Expr *E) { Data->getChildren()[DataPositionTy::POS_E] = E; } public: /// Creates directive with a list of \a Clauses and 'x', 'v' and 'expr' /// parts of the atomic construct (see Section 2.12.6, atomic Construct, for /// detailed description of 'x', 'v' and 'expr'). /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param X 'x' part of the associated expression/statement. /// \param V 'v' part of the associated expression/statement. /// \param E 'expr' part of the associated expression/statement. /// \param UE Helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. /// \param IsXLHSInRHSPart true if \a UE has the first form and false if the /// second. /// \param IsPostfixUpdate true if original value of 'x' must be stored in /// 'v', not an updated one. static OMPAtomicDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V, Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPAtomicDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Get 'x' part of the associated expression/statement. Expr *getX() { return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_X]); } const Expr *getX() const { return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_X]); } /// Get helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. Expr *getUpdateExpr() { return cast_or_null<Expr>( Data->getChildren()[DataPositionTy::POS_UpdateExpr]); } const Expr *getUpdateExpr() const { return cast_or_null<Expr>( Data->getChildren()[DataPositionTy::POS_UpdateExpr]); } /// Return true if helper update expression has form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' and false if it has form /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; } /// Return true if 'v' expression must be updated to original value of /// 'x', false if 'v' must be updated to the new value of 'x'. bool isPostfixUpdate() const { return IsPostfixUpdate; } /// Get 'v' part of the associated expression/statement. Expr *getV() { return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_V]); } const Expr *getV() const { return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_V]); } /// Get 'expr' part of the associated expression/statement. Expr *getExpr() { return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_E]); } const Expr *getExpr() const { return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_E]); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPAtomicDirectiveClass; } }; /// This represents '#pragma omp target' directive. /// /// \code /// #pragma omp target if(a) /// \endcode /// In this example directive '#pragma omp target' has clause 'if' with /// condition 'a'. /// class OMPTargetDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTargetDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetDirectiveClass, llvm::omp::OMPD_target, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTargetDirective() : OMPExecutableDirective(OMPTargetDirectiveClass, llvm::omp::OMPD_target, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetDirectiveClass; } }; /// This represents '#pragma omp target data' directive. /// /// \code /// #pragma omp target data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target data' has clauses 'device' /// with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// OMPTargetDataDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetDataDirectiveClass, llvm::omp::OMPD_target_data, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTargetDataDirective() : OMPExecutableDirective(OMPTargetDataDirectiveClass, llvm::omp::OMPD_target_data, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetDataDirectiveClass; } }; /// This represents '#pragma omp target enter data' directive. /// /// \code /// #pragma omp target enter data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target enter data' has clauses /// 'device' with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetEnterDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// OMPTargetEnterDataDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetEnterDataDirectiveClass, llvm::omp::OMPD_target_enter_data, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTargetEnterDataDirective() : OMPExecutableDirective(OMPTargetEnterDataDirectiveClass, llvm::omp::OMPD_target_enter_data, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetEnterDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetEnterDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetEnterDataDirectiveClass; } }; /// This represents '#pragma omp target exit data' directive. /// /// \code /// #pragma omp target exit data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target exit data' has clauses /// 'device' with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetExitDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// OMPTargetExitDataDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetExitDataDirectiveClass, llvm::omp::OMPD_target_exit_data, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTargetExitDataDirective() : OMPExecutableDirective(OMPTargetExitDataDirectiveClass, llvm::omp::OMPD_target_exit_data, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetExitDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetExitDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetExitDataDirectiveClass; } }; /// This represents '#pragma omp target parallel' directive. /// /// \code /// #pragma omp target parallel if(a) /// \endcode /// In this example directive '#pragma omp target parallel' has clause 'if' with /// condition 'a'. /// class OMPTargetParallelDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTargetParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetParallelDirectiveClass, llvm::omp::OMPD_target_parallel, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTargetParallelDirective() : OMPExecutableDirective(OMPTargetParallelDirectiveClass, llvm::omp::OMPD_target_parallel, SourceLocation(), SourceLocation()) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTargetParallelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetParallelDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[0]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPTargetParallelDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelDirectiveClass; } }; /// This represents '#pragma omp target parallel for' directive. /// /// \code /// #pragma omp target parallel for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp target parallel for' has clauses /// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+' /// and variables 'c' and 'd'. /// class OMPTargetParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if current region has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTargetParallelForDirectiveClass, llvm::omp::OMPD_target_parallel_for, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetParallelForDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTargetParallelForDirectiveClass, llvm::omp::OMPD_target_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_target_parallel_for)] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPTargetParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_target_parallel_for)]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPTargetParallelForDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelForDirectiveClass; } }; /// This represents '#pragma omp teams' directive. /// /// \code /// #pragma omp teams if(a) /// \endcode /// In this example directive '#pragma omp teams' has clause 'if' with /// condition 'a'. /// class OMPTeamsDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTeamsDirectiveClass, llvm::omp::OMPD_teams, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTeamsDirective() : OMPExecutableDirective(OMPTeamsDirectiveClass, llvm::omp::OMPD_teams, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTeamsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDirectiveClass; } }; /// This represents '#pragma omp cancellation point' directive. /// /// \code /// #pragma omp cancellation point for /// \endcode /// /// In this example a cancellation point is created for innermost 'for' region. class OMPCancellationPointDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; OpenMPDirectiveKind CancelRegion = llvm::omp::OMPD_unknown; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// statements and child expressions. /// OMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPCancellationPointDirectiveClass, llvm::omp::OMPD_cancellation_point, StartLoc, EndLoc) {} /// Build an empty directive. explicit OMPCancellationPointDirective() : OMPExecutableDirective(OMPCancellationPointDirectiveClass, llvm::omp::OMPD_cancellation_point, SourceLocation(), SourceLocation()) {} /// Set cancel region for current cancellation point. /// \param CR Cancellation region. void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPCancellationPointDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Creates an empty directive. /// /// \param C AST context. /// static OMPCancellationPointDirective *CreateEmpty(const ASTContext &C, EmptyShell); /// Get cancellation region for the current cancellation point. OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCancellationPointDirectiveClass; } }; /// This represents '#pragma omp cancel' directive. /// /// \code /// #pragma omp cancel for /// \endcode /// /// In this example a cancel is created for innermost 'for' region. class OMPCancelDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; OpenMPDirectiveKind CancelRegion = llvm::omp::OMPD_unknown; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPCancelDirectiveClass, llvm::omp::OMPD_cancel, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPCancelDirective() : OMPExecutableDirective(OMPCancelDirectiveClass, llvm::omp::OMPD_cancel, SourceLocation(), SourceLocation()) {} /// Set cancel region for current cancellation point. /// \param CR Cancellation region. void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// static OMPCancelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, OpenMPDirectiveKind CancelRegion); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPCancelDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Get cancellation region for the current cancellation point. OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCancelDirectiveClass; } }; /// This represents '#pragma omp taskloop' directive. /// /// \code /// #pragma omp taskloop private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp taskloop' has clauses 'private' /// with the variables 'a' and 'b', 'grainsize' with expression 'val' and /// 'num_tasks' with expression 'num'. /// class OMPTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTaskLoopDirectiveClass, llvm::omp::OMPD_taskloop, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTaskLoopDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTaskLoopDirectiveClass, llvm::omp::OMPD_taskloop, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskLoopDirectiveClass; } }; /// This represents '#pragma omp taskloop simd' directive. /// /// \code /// #pragma omp taskloop simd private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp taskloop simd' has clauses 'private' /// with the variables 'a' and 'b', 'grainsize' with expression 'val' and /// 'num_tasks' with expression 'num'. /// class OMPTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTaskLoopSimdDirectiveClass, llvm::omp::OMPD_taskloop_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTaskLoopSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTaskLoopSimdDirectiveClass, llvm::omp::OMPD_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTaskLoopSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskLoopSimdDirectiveClass; } }; /// This represents '#pragma omp master taskloop' directive. /// /// \code /// #pragma omp master taskloop private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp master taskloop' has clauses /// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val' /// and 'num_tasks' with expression 'num'. /// class OMPMasterTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPMasterTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPMasterTaskLoopDirectiveClass, llvm::omp::OMPD_master_taskloop, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPMasterTaskLoopDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPMasterTaskLoopDirectiveClass, llvm::omp::OMPD_master_taskloop, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPMasterTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPMasterTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterTaskLoopDirectiveClass; } }; /// This represents '#pragma omp master taskloop simd' directive. /// /// \code /// #pragma omp master taskloop simd private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp master taskloop simd' has clauses /// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val' /// and 'num_tasks' with expression 'num'. /// class OMPMasterTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPMasterTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPMasterTaskLoopSimdDirectiveClass, llvm::omp::OMPD_master_taskloop_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPMasterTaskLoopSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPMasterTaskLoopSimdDirectiveClass, llvm::omp::OMPD_master_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \p Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPMasterTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \p NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPMasterTaskLoopSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterTaskLoopSimdDirectiveClass; } }; /// This represents '#pragma omp parallel master taskloop' directive. /// /// \code /// #pragma omp parallel master taskloop private(a,b) grainsize(val) /// num_tasks(num) /// \endcode /// In this example directive '#pragma omp parallel master taskloop' has clauses /// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val' /// and 'num_tasks' with expression 'num'. /// class OMPParallelMasterTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPParallelMasterTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPParallelMasterTaskLoopDirectiveClass, llvm::omp::OMPD_parallel_master_taskloop, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPParallelMasterTaskLoopDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPParallelMasterTaskLoopDirectiveClass, llvm::omp::OMPD_parallel_master_taskloop, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPParallelMasterTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelMasterTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass; } }; /// This represents '#pragma omp parallel master taskloop simd' directive. /// /// \code /// #pragma omp parallel master taskloop simd private(a,b) grainsize(val) /// num_tasks(num) /// \endcode /// In this example directive '#pragma omp parallel master taskloop simd' has /// clauses 'private' with the variables 'a' and 'b', 'grainsize' with /// expression 'val' and 'num_tasks' with expression 'num'. /// class OMPParallelMasterTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPParallelMasterTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPParallelMasterTaskLoopSimdDirectiveClass, llvm::omp::OMPD_parallel_master_taskloop_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPParallelMasterTaskLoopSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPParallelMasterTaskLoopSimdDirectiveClass, llvm::omp::OMPD_parallel_master_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \p Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPParallelMasterTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelMasterTaskLoopSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelMasterTaskLoopSimdDirectiveClass; } }; /// This represents '#pragma omp distribute' directive. /// /// \code /// #pragma omp distribute private(a,b) /// \endcode /// In this example directive '#pragma omp distribute' has clauses 'private' /// with the variables 'a' and 'b' /// class OMPDistributeDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeDirectiveClass, llvm::omp::OMPD_distribute, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPDistributeDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeDirectiveClass, llvm::omp::OMPD_distribute, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeDirectiveClass; } }; /// This represents '#pragma omp target update' directive. /// /// \code /// #pragma omp target update to(a) from(b) device(1) /// \endcode /// In this example directive '#pragma omp target update' has clause 'to' with /// argument 'a', clause 'from' with argument 'b' and clause 'device' with /// argument '1'. /// class OMPTargetUpdateDirective : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// OMPTargetUpdateDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetUpdateDirectiveClass, llvm::omp::OMPD_target_update, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPTargetUpdateDirective() : OMPExecutableDirective(OMPTargetUpdateDirectiveClass, llvm::omp::OMPD_target_update, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetUpdateDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses The number of clauses. /// static OMPTargetUpdateDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetUpdateDirectiveClass; } }; /// This represents '#pragma omp distribute parallel for' composite /// directive. /// /// \code /// #pragma omp distribute parallel for private(a,b) /// \endcode /// In this example directive '#pragma omp distribute parallel for' has clause /// 'private' with the variables 'a' and 'b' /// class OMPDistributeParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeParallelForDirectiveClass, llvm::omp::OMPD_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPDistributeParallelForDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeParallelForDirectiveClass, llvm::omp::OMPD_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_distribute_parallel_for)] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_distribute_parallel_for)]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPDistributeParallelForDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp distribute parallel for simd' composite /// directive. /// /// \code /// #pragma omp distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp distribute parallel for simd' has /// clause 'private' with the variables 'x' /// class OMPDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPDistributeParallelForSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeParallelForSimdDirective *Create( const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeParallelForSimdDirective *CreateEmpty( const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp distribute simd' composite directive. /// /// \code /// #pragma omp distribute simd private(x) /// \endcode /// In this example directive '#pragma omp distribute simd' has clause /// 'private' with the variables 'x' /// class OMPDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeSimdDirectiveClass, llvm::omp::OMPD_distribute_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPDistributeSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPDistributeSimdDirectiveClass, llvm::omp::OMPD_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp target parallel for simd' directive. /// /// \code /// #pragma omp target parallel for simd private(a) map(b) safelen(c) /// \endcode /// In this example directive '#pragma omp target parallel for simd' has clauses /// 'private' with the variable 'a', 'map' with the variable 'b' and 'safelen' /// with the variable 'c'. /// class OMPTargetParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTargetParallelForSimdDirectiveClass, llvm::omp::OMPD_target_parallel_for_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetParallelForSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTargetParallelForSimdDirectiveClass, llvm::omp::OMPD_target_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetParallelForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp target simd' directive. /// /// \code /// #pragma omp target simd private(a) map(b) safelen(c) /// \endcode /// In this example directive '#pragma omp target simd' has clauses 'private' /// with the variable 'a', 'map' with the variable 'b' and 'safelen' with /// the variable 'c'. /// class OMPTargetSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTargetSimdDirectiveClass, llvm::omp::OMPD_target_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTargetSimdDirectiveClass, llvm::omp::OMPD_target_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute' directive. /// /// \code /// #pragma omp teams distribute private(a,b) /// \endcode /// In this example directive '#pragma omp teams distribute' has clauses /// 'private' with the variables 'a' and 'b' /// class OMPTeamsDistributeDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeDirectiveClass, llvm::omp::OMPD_teams_distribute, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTeamsDistributeDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeDirectiveClass, llvm::omp::OMPD_teams_distribute, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeDirectiveClass; } }; /// This represents '#pragma omp teams distribute simd' /// combined directive. /// /// \code /// #pragma omp teams distribute simd private(a,b) /// \endcode /// In this example directive '#pragma omp teams distribute simd' /// has clause 'private' with the variables 'a' and 'b' /// class OMPTeamsDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTeamsDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeSimdDirectiveClass, llvm::omp::OMPD_teams_distribute_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTeamsDistributeSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeSimdDirectiveClass, llvm::omp::OMPD_teams_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute parallel for simd' composite /// directive. /// /// \code /// #pragma omp teams distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp teams distribute parallel for simd' /// has clause 'private' with the variables 'x' /// class OMPTeamsDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_teams_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTeamsDistributeParallelForSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_teams_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeParallelForSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute parallel for' composite /// directive. /// /// \code /// #pragma omp teams distribute parallel for private(x) /// \endcode /// In this example directive '#pragma omp teams distribute parallel for' /// has clause 'private' with the variables 'x' /// class OMPTeamsDistributeParallelForDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTeamsDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeParallelForDirectiveClass, llvm::omp::OMPD_teams_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTeamsDistributeParallelForDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTeamsDistributeParallelForDirectiveClass, llvm::omp::OMPD_teams_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_teams_distribute_parallel_for)] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTeamsDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeParallelForDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_teams_distribute_parallel_for)]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPTeamsDistributeParallelForDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp target teams' directive. /// /// \code /// #pragma omp target teams if(a>0) /// \endcode /// In this example directive '#pragma omp target teams' has clause 'if' with /// condition 'a>0'. /// class OMPTargetTeamsDirective final : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTargetTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPTargetTeamsDirectiveClass, llvm::omp::OMPD_target_teams, StartLoc, EndLoc) { } /// Build an empty directive. /// explicit OMPTargetTeamsDirective() : OMPExecutableDirective(OMPTargetTeamsDirectiveClass, llvm::omp::OMPD_target_teams, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDirectiveClass; } }; /// This represents '#pragma omp target teams distribute' combined directive. /// /// \code /// #pragma omp target teams distribute private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute' has clause /// 'private' with the variables 'x' /// class OMPTargetTeamsDistributeDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTargetTeamsDistributeDirectiveClass, llvm::omp::OMPD_target_teams_distribute, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetTeamsDistributeDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTargetTeamsDistributeDirectiveClass, llvm::omp::OMPD_target_teams_distribute, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass; } }; /// This represents '#pragma omp target teams distribute parallel for' combined /// directive. /// /// \code /// #pragma omp target teams distribute parallel for private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute parallel /// for' has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeParallelForDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetTeamsDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTargetTeamsDistributeParallelForDirectiveClass, llvm::omp::OMPD_target_teams_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetTeamsDistributeParallelForDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTargetTeamsDistributeParallelForDirectiveClass, llvm::omp::OMPD_target_teams_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum) {} /// Sets special task reduction descriptor. void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_target_teams_distribute_parallel_for)] = E; } /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param TaskRedRef Task reduction special reference expression to handle /// taskgroup descriptor. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTargetTeamsDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeParallelForDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Returns special task reduction reference expression. Expr *getTaskReductionRefExpr() { return cast_or_null<Expr>(Data->getChildren()[numLoopChildren( getLoopsNumber(), llvm::omp::OMPD_target_teams_distribute_parallel_for)]); } const Expr *getTaskReductionRefExpr() const { return const_cast<OMPTargetTeamsDistributeParallelForDirective *>(this) ->getTaskReductionRefExpr(); } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp target teams distribute parallel for simd' /// combined directive. /// /// \code /// #pragma omp target teams distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute parallel /// for simd' has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective( OMPTargetTeamsDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_target_teams_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetTeamsDistributeParallelForSimdDirective( unsigned CollapsedNum) : OMPLoopDirective( OMPTargetTeamsDistributeParallelForSimdDirectiveClass, llvm::omp::OMPD_target_teams_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeParallelForSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp target teams distribute simd' combined /// directive. /// /// \code /// #pragma omp target teams distribute simd private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute simd' /// has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// OMPTargetTeamsDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum) : OMPLoopDirective(OMPTargetTeamsDistributeSimdDirectiveClass, llvm::omp::OMPD_target_teams_distribute_simd, StartLoc, EndLoc, CollapsedNum) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// explicit OMPTargetTeamsDistributeSimdDirective(unsigned CollapsedNum) : OMPLoopDirective(OMPTargetTeamsDistributeSimdDirectiveClass, llvm::omp::OMPD_target_teams_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass; } }; /// This represents the '#pragma omp tile' loop transformation directive. class OMPTileDirective final : public OMPLoopTransformationDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Default list of offsets. enum { PreInitsOffset = 0, TransformedStmtOffset, }; explicit OMPTileDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumLoops) : OMPLoopTransformationDirective(OMPTileDirectiveClass, llvm::omp::OMPD_tile, StartLoc, EndLoc, NumLoops) { setNumGeneratedLoops(3 * NumLoops); } void setPreInits(Stmt *PreInits) { Data->getChildren()[PreInitsOffset] = PreInits; } void setTransformedStmt(Stmt *S) { Data->getChildren()[TransformedStmtOffset] = S; } public: /// Create a new AST node representation for '#pragma omp tile'. /// /// \param C Context of the AST. /// \param StartLoc Location of the introducer (e.g. the 'omp' token). /// \param EndLoc Location of the directive's end (e.g. the tok::eod). /// \param Clauses The directive's clauses. /// \param NumLoops Number of associated loops (number of items in the /// 'sizes' clause). /// \param AssociatedStmt The outermost associated loop. /// \param TransformedStmt The loop nest after tiling, or nullptr in /// dependent contexts. /// \param PreInits Helper preinits statements for the loop nest. static OMPTileDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, unsigned NumLoops, Stmt *AssociatedStmt, Stmt *TransformedStmt, Stmt *PreInits); /// Build an empty '#pragma omp tile' AST node for deserialization. /// /// \param C Context of the AST. /// \param NumClauses Number of clauses to allocate. /// \param NumLoops Number of associated loops to allocate. static OMPTileDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned NumLoops); /// Gets/sets the associated loops after tiling. /// /// This is in de-sugared format stored as a CompoundStmt. /// /// \code /// for (...) /// ... /// \endcode /// /// Note that if the generated loops a become associated loops of another /// directive, they may need to be hoisted before them. Stmt *getTransformedStmt() const { return Data->getChildren()[TransformedStmtOffset]; } /// Return preinits statement. Stmt *getPreInits() const { return Data->getChildren()[PreInitsOffset]; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTileDirectiveClass; } }; /// This represents the '#pragma omp unroll' loop transformation directive. /// /// \code /// #pragma omp unroll /// for (int i = 0; i < 64; ++i) /// \endcode class OMPUnrollDirective final : public OMPLoopTransformationDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Default list of offsets. enum { PreInitsOffset = 0, TransformedStmtOffset, }; explicit OMPUnrollDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPLoopTransformationDirective(OMPUnrollDirectiveClass, llvm::omp::OMPD_unroll, StartLoc, EndLoc, 1) {} /// Set the pre-init statements. void setPreInits(Stmt *PreInits) { Data->getChildren()[PreInitsOffset] = PreInits; } /// Set the de-sugared statement. void setTransformedStmt(Stmt *S) { Data->getChildren()[TransformedStmtOffset] = S; } public: /// Create a new AST node representation for '#pragma omp unroll'. /// /// \param C Context of the AST. /// \param StartLoc Location of the introducer (e.g. the 'omp' token). /// \param EndLoc Location of the directive's end (e.g. the tok::eod). /// \param Clauses The directive's clauses. /// \param AssociatedStmt The outermost associated loop. /// \param TransformedStmt The loop nest after tiling, or nullptr in /// dependent contexts. /// \param PreInits Helper preinits statements for the loop nest. static OMPUnrollDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, unsigned NumGeneratedLoops, Stmt *TransformedStmt, Stmt *PreInits); /// Build an empty '#pragma omp unroll' AST node for deserialization. /// /// \param C Context of the AST. /// \param NumClauses Number of clauses to allocate. static OMPUnrollDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses); /// Get the de-sugared associated loops after unrolling. /// /// This is only used if the unrolled loop becomes an associated loop of /// another directive, otherwise the loop is emitted directly using loop /// transformation metadata. When the unrolled loop cannot be used by another /// directive (e.g. because of the full clause), the transformed stmt can also /// be nullptr. Stmt *getTransformedStmt() const { return Data->getChildren()[TransformedStmtOffset]; } /// Return the pre-init statements. Stmt *getPreInits() const { return Data->getChildren()[PreInitsOffset]; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPUnrollDirectiveClass; } }; /// This represents '#pragma omp scan' directive. /// /// \code /// #pragma omp scan inclusive(a) /// \endcode /// In this example directive '#pragma omp scan' has clause 'inclusive' with /// list item 'a'. class OMPScanDirective final : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPScanDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPScanDirectiveClass, llvm::omp::OMPD_scan, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPScanDirective() : OMPExecutableDirective(OMPScanDirectiveClass, llvm::omp::OMPD_scan, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses (only single OMPFlushClause clause is /// allowed). /// static OMPScanDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPScanDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPScanDirectiveClass; } }; /// This represents '#pragma omp interop' directive. /// /// \code /// #pragma omp interop init(target:obj) device(x) depend(inout:y) nowait /// \endcode /// In this example directive '#pragma omp interop' has /// clauses 'init', 'device', 'depend' and 'nowait'. /// class OMPInteropDirective final : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive. /// \param EndLoc Ending location of the directive. /// OMPInteropDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPInteropDirectiveClass, llvm::omp::OMPD_interop, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPInteropDirective() : OMPExecutableDirective(OMPInteropDirectiveClass, llvm::omp::OMPD_interop, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive. /// \param EndLoc Ending Location of the directive. /// \param Clauses The directive's clauses. /// static OMPInteropDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// Creates an empty directive. /// /// \param C AST context. /// static OMPInteropDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPInteropDirectiveClass; } }; /// This represents '#pragma omp dispatch' directive. /// /// \code /// #pragma omp dispatch device(dnum) /// \endcode /// This example shows a directive '#pragma omp dispatch' with a /// device clause with variable 'dnum'. /// class OMPDispatchDirective final : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// The location of the target-call. SourceLocation TargetCallLoc; /// Set the location of the target-call. void setTargetCallLoc(SourceLocation Loc) { TargetCallLoc = Loc; } /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPDispatchDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPDispatchDirectiveClass, llvm::omp::OMPD_dispatch, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPDispatchDirective() : OMPExecutableDirective(OMPDispatchDirectiveClass, llvm::omp::OMPD_dispatch, SourceLocation(), SourceLocation()) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param TargetCallLoc Location of the target-call. /// static OMPDispatchDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, SourceLocation TargetCallLoc); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPDispatchDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return location of target-call. SourceLocation getTargetCallLoc() const { return TargetCallLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDispatchDirectiveClass; } }; /// This represents '#pragma omp masked' directive. /// \code /// #pragma omp masked filter(tid) /// \endcode /// This example shows a directive '#pragma omp masked' with a filter clause /// with variable 'tid'. /// class OMPMaskedDirective final : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPMaskedDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPMaskedDirectiveClass, llvm::omp::OMPD_masked, StartLoc, EndLoc) {} /// Build an empty directive. /// explicit OMPMaskedDirective() : OMPExecutableDirective(OMPMaskedDirectiveClass, llvm::omp::OMPD_masked, SourceLocation(), SourceLocation()) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPMaskedDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// static OMPMaskedDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMaskedDirectiveClass; } }; /// This represents '#pragma omp metadirective' directive. /// /// \code /// #pragma omp metadirective when(user={condition(N>10)}: parallel for) /// \endcode /// In this example directive '#pragma omp metadirective' has clauses 'when' /// with a dynamic user condition to check if a variable 'N > 10' /// class OMPMetaDirective final : public OMPExecutableDirective { friend class ASTStmtReader; friend class OMPExecutableDirective; Stmt *IfStmt; OMPMetaDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(OMPMetaDirectiveClass, llvm::omp::OMPD_metadirective, StartLoc, EndLoc) {} explicit OMPMetaDirective() : OMPExecutableDirective(OMPMetaDirectiveClass, llvm::omp::OMPD_metadirective, SourceLocation(), SourceLocation()) {} void setIfStmt(Stmt *S) { IfStmt = S; } public: static OMPMetaDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Stmt *IfStmt); static OMPMetaDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); Stmt *getIfStmt() const { return IfStmt; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMetaDirectiveClass; } }; } // end namespace clang #endif
conv_direct_hcl_int8_x86.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: qtang@openailab.com */ #include "convolution_param.h" #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "utility/sys_port.h" #include "utility/float.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <math.h> #include <string.h> static void pad_int8(int8_t* input, int8_t* output, int in_h, int in_w, int out_h, int out_w, int top, int left, int8_t v) { int8_t* ptr = input; int8_t* outptr = output; int y = 0; // fill top for (; y < top; y++) { int x = 0; for (; x < out_w; x++) { outptr[x] = v; } outptr += out_w; } // fill center for (; y < (top + in_h); y++) { int x = 0; for (; x < left; x++) { outptr[x] = v; } if (in_w < 12) { for (; x < (left + in_w); x++) { outptr[x] = ptr[x - left]; } } else { memcpy(outptr + left, ptr, (size_t)in_w * sizeof(int8_t)); x += in_w; } for (; x < out_w; x++) { outptr[x] = v; } ptr += in_w; outptr += out_w; } // fill bottom for (; y < out_h; y++) { int x = 0; for (; x < out_w; x++) { outptr[x] = v; } outptr += out_w; } } static int conv3x3s1_int8_sse(struct tensor* input_tensor, struct tensor* weight_tensor, struct tensor* bias_tensor, struct tensor* output_tensor, struct conv_param* param, int num_thread) { int inch = input_tensor->dims[1]; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int in_hw = inh * inw; int outch = output_tensor->dims[1]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; int out_hw = outh * outw; int out_size = output_tensor->elem_num; int pad_w = param->pad_w0; int pad_h = param->pad_h0; int32_t* output_int32 = (int32_t*)sys_malloc(out_size * sizeof(int32_t)); memset(output_int32, 0, out_size * sizeof(int32_t)); float* output_fp32 = (float*)sys_malloc(out_size * sizeof(float)); int8_t* output_int8 = output_tensor->data; int8_t* input_int8 = input_tensor->data; int32_t* bias_int32 = NULL; if(bias_tensor) bias_int32 = bias_tensor->data; /* get scale value of quantizaiton */ float input_scale = input_tensor->scale; float* kernel_scales = weight_tensor->scale_list; float output_scale = output_tensor->scale; const signed char* kernel = weight_tensor->data; /* pading */ int inh_tmp = inh + pad_h + pad_h; int inw_tmp = inw + pad_w + pad_w; int8_t* input_tmp = NULL; if (inh_tmp == inh && inw_tmp == inw) input_tmp = input_int8; else { input_tmp = ( int8_t* )sys_malloc((size_t)inh_tmp * inw_tmp * inch * sizeof(int8_t)); #pragma omp parallel for num_threads(num_thread) for (int g = 0; g < inch; g++) { int8_t* pad_in = input_int8 + g * inh * inw; int8_t* pad_out = input_tmp + g * inh_tmp * inw_tmp; pad_int8(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0); } } #pragma omp parallel for num_threads(num_thread) for (int p = 0; p < outch; p++) { int32_t* out0 = output_int32 + p * out_hw; int8_t* kernel0 = (int8_t* )kernel + p * inch * 9; for (int q = 0; q < inch; q++) { int* outptr0 = out0; int8_t* img0 = input_tmp + q * inw_tmp * inh_tmp; int8_t* r0 = img0; int8_t* r1 = img0 + inw_tmp; int8_t* r2 = img0 + inw_tmp * 2; for (int i = 0; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum0 = 0; sum0 += ( int )r0[0] * kernel0[0]; sum0 += ( int )r0[1] * kernel0[1]; sum0 += ( int )r0[2] * kernel0[2]; sum0 += ( int )r1[0] * kernel0[3]; sum0 += ( int )r1[1] * kernel0[4]; sum0 += ( int )r1[2] * kernel0[5]; sum0 += ( int )r2[0] * kernel0[6]; sum0 += ( int )r2[1] * kernel0[7]; sum0 += ( int )r2[2] * kernel0[8]; *outptr0 += sum0; r0++; r1++; r2++; outptr0++; } r0 += 2; r1 += 2; r2 += 2; } kernel0 += 9; } } /* process bias and dequant output from int32 to fp32 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (bias_tensor) output_fp32[output_off] = (float )(output_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i]; else output_fp32[output_off] = (float )output_int32[output_off] * input_scale * kernel_scales[i]; } } /* process activation relu */ if (param->activation == 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; } } } /* process activation relu6 */ if (param->activation > 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; if (output_fp32[output_off] > 6) output_fp32[output_off] = 6; } } } /* quant from fp32 to int8 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; int32_t data_i32 = ( int32_t )(round(output_fp32[output_off] / output_scale)); if (data_i32 > 127) data_i32 = 127; else if (data_i32 < -127) data_i32 = -127; output_int8[output_off] = (int8_t)data_i32; } } sys_free(output_int32); sys_free(output_fp32); if (!(inh_tmp == inh && inw_tmp == inw)) sys_free(input_tmp); return 0; } static int conv3x3s2_int8_sse(struct tensor* input_tensor, struct tensor* weight_tensor, struct tensor* bias_tensor, struct tensor* output_tensor, struct conv_param* param, int num_thread) { int inch = input_tensor->dims[1]; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int in_hw = inh * inw; int outch = output_tensor->dims[1]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; int out_hw = outh * outw; int out_size = output_tensor->elem_num; int pad_w = param->pad_w0; int pad_h = param->pad_h0; int32_t* output_int32 = (int32_t*)sys_malloc(out_size * sizeof(int32_t)); memset(output_int32, 0, out_size * sizeof(int32_t)); float* output_fp32 = (float*)sys_malloc(out_size * sizeof(float)); int8_t* output_int8 = output_tensor->data; int8_t* input_int8 = input_tensor->data; int32_t* bias_int32 = NULL; if(bias_tensor) bias_int32 = bias_tensor->data; /* get scale value of quantizaiton */ float input_scale = input_tensor->scale; float* kernel_scales = weight_tensor->scale_list; float output_scale = output_tensor->scale; const signed char* kernel = weight_tensor->data; /* pading */ int inh_tmp = inh + pad_h + pad_h; int inw_tmp = inw + pad_w + pad_w; int8_t* input_tmp = NULL; if (inh_tmp == inh && inw_tmp == inw) input_tmp = input_int8; else { input_tmp = ( int8_t* )sys_malloc((size_t)inh_tmp * inw_tmp * inch * sizeof(int8_t)); #pragma omp parallel for num_threads(num_thread) for (int g = 0; g < inch; g++) { int8_t* pad_in = input_int8 + g * inh * inw; int8_t* pad_out = input_tmp + g * inh_tmp * inw_tmp; pad_int8(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0); } } int tailstep = inw_tmp - 2 * outw + inw_tmp; #pragma omp parallel for num_threads(num_thread) for (int p = 0; p < outch; p++) { int32_t* out0 = output_int32 + p * out_hw; int8_t* kernel0 = (int8_t* )kernel + p * inch * 9; for (int q = 0; q < inch; q++) { int* outptr0 = out0; int8_t* img0 = input_tmp + q * inw_tmp * inh_tmp; int8_t* r0 = img0; int8_t* r1 = img0 + inw_tmp; int8_t* r2 = img0 + inw_tmp * 2; for (int i = 0; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum0 = 0; sum0 += ( int )r0[0] * kernel0[0]; sum0 += ( int )r0[1] * kernel0[1]; sum0 += ( int )r0[2] * kernel0[2]; sum0 += ( int )r1[0] * kernel0[3]; sum0 += ( int )r1[1] * kernel0[4]; sum0 += ( int )r1[2] * kernel0[5]; sum0 += ( int )r2[0] * kernel0[6]; sum0 += ( int )r2[1] * kernel0[7]; sum0 += ( int )r2[2] * kernel0[8]; *outptr0 += sum0; r0 += 2; r1 += 2; r2 += 2; outptr0++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } kernel0 += 9; } } /* process bias and dequant output from int32 to fp32 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (bias_tensor) output_fp32[output_off] = (float )(output_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i]; else output_fp32[output_off] = (float )output_int32[output_off] * input_scale * kernel_scales[i]; } } /* process activation relu */ if (param->activation == 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; } } } /* process activation relu6 */ if (param->activation > 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; if (output_fp32[output_off] > 6) output_fp32[output_off] = 6; } } } /* quant from fp32 to int8 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; int32_t data_i32 = ( int32_t )(round(output_fp32[output_off] / output_scale)); if (data_i32 > 127) data_i32 = 127; else if (data_i32 < -127) data_i32 = -127; output_int8[output_off] = (int8_t)data_i32; } } sys_free(output_int32); sys_free(output_fp32); if (!(inh_tmp == inh && inw_tmp == inw)) sys_free(input_tmp); return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor; struct tensor* weight_tensor; struct tensor* bias_tensor = NULL; struct tensor* output_tensor = NULL; int num_thread = exec_graph->num_thread; /* set the input data and shape again, in case of reshape or dynamic shape */ input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); weight_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]); if (ir_node->input_num > 2) bias_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[2]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); struct conv_param* conv_param = ( struct conv_param* )ir_node->op.param_mem; int ret = -1; switch(conv_param->stride_h) { case 1: ret = conv3x3s1_int8_sse(input_tensor, weight_tensor, bias_tensor, output_tensor, conv_param, num_thread); break; case 2: ret = conv3x3s2_int8_sse(input_tensor, weight_tensor, bias_tensor, output_tensor, conv_param, num_thread); break; default: TLOG_ERR("Direct Convolution Int8 not support the stride %d\n", conv_param->stride_h); } return ret; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node) { struct conv_param* param = ( struct conv_param* )exec_node->op.param_mem; struct node* ir_node = exec_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor; int group = param->group; int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; int pad_h0 = param->pad_h0; int pad_w0 = param->pad_w0; int pad_h1 = param->pad_h1; int pad_w1 = param->pad_w1; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); /* only support int8 */ if (input_tensor->data_type != TENGINE_DT_INT8) return 0; if (group == 1 && pad_h0 == pad_h1 && pad_w0 == pad_w1 && dilation_h == 1 && dilation_w == 1 && kernel_h == 3 && kernel_w == 3 && ((stride_h == 1 && stride_w == 1) || (stride_h == 2 && stride_w == 2))) return OPS_SCORE_BEST * 2; else return 0; } static struct node_ops hcl_node_ops = {.prerun = NULL, .run = run, .reshape = NULL, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; int register_conv_direct_hcl_int8_x86_op() { return register_builtin_node_ops(OP_CONV, &hcl_node_ops); } int unregister_conv_direct_hcl_int8_x86_op() { unregister_builtin_node_ops(OP_CONV, &hcl_node_ops); return 0; }
unpack.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Sandeep Sharma * */ #include <stdio.h> #include <stdlib.h> #include "math.h" #define MAX(I,J) ((I) > (J) ? (I) : (J)) #define MIN(I,J) ((I) < (J) ? (I) : (J)) void unpackE3(char* file, char* fout, int norb) { FILE *f = fopen(file, "rb"); size_t norb2 = norb*norb; // 6-fold symmetry size_t e3slicesize = (norb2*norb2*norb2 + 3*norb2*norb2 + 2*norb2)/6; double *fj = (double*)malloc(e3slicesize*sizeof(double)); fread(fj, sizeof(*fj), e3slicesize, f); fclose(f); double *e3 = (double*)malloc(norb2*norb2*norb2*sizeof(double)); #pragma omp parallel default(none) \ shared(norb, norb2, e3, fj) { int i, j, k, l, m, n; #pragma omp parallel for for (i=0; i<norb; i++) for (j=0; j<norb; j++) for (k=0; k<norb; k++) for (l=0; l<norb; l++) for (m=0; m<norb; m++) for (n=0; n<norb; n++) { size_t a = i*norb+l, b = j*norb+m, c = k*norb+n; // E_ABC=E_ACB=... size_t A=0, B=0, C=0; A = MAX(a,MAX(b,c)); if (A==a) { B = MAX(b,c); C = MIN(b,c); } else if (A==b) { B = MAX(a,c); C = MIN(a,c); } else { B = MAX(a,b); C = MIN(a,b); } // tetrahedral number + triangular number + square number // A(A+1)(A+2)/3! + B(B+1)/2! + C/1! size_t p = (A*A*A + 3*A*A + 2*A)/6 + (B*B + B)/2 + C ; // fully square number size_t q = i+j*norb+k*norb2+l*norb*norb2+m*norb2*norb2+n*norb2*norb2*norb; e3[q] = fj[p]; } } FILE *f2 = fopen(fout, "wb"); fwrite(e3, sizeof(*e3), norb2*norb2*norb2, f2); fclose(f2); free(e3); free(fj); }; void unpackE4(char* file, char* fout, int norb) { FILE *f = fopen(file, "rb"); size_t norb2 = norb*norb; // 8-fold symmetry size_t e4slicesize = (norb2*norb2*norb2*norb2 + 6*norb2*norb2*norb2 + 11*norb2*norb2 + 6*norb2)/24; double *fj = (double*)malloc(e4slicesize*sizeof(double)); fread(fj, sizeof(*fj), e4slicesize, f); fclose(f); double *e4 = (double*)malloc(norb2*norb2*norb2*norb2*sizeof(double)); int i, j, k, h, l, m, n, o; for (i=0; i<norb; i++) for (j=0; j<norb; j++) for (k=0; k<norb; k++) for (h=0; h<norb; h++) for (l=0; l<norb; l++) for (m=0; m<norb; m++) for (n=0; n<norb; n++) for (o=0; o<norb; o++) { size_t a = i*norb+l, b = j*norb+m, c = k*norb+n, d = h*norb+o; // E_ABCD=E_ACBD=... size_t A=0, B=0, C=0, D=0; size_t prov=MAX(a,b); A = MAX(prov,MAX(c,d)); if (A==a) { if (MAX(b,MAX(c,d))==b) { B=b; C=MAX(c,d); D=MIN(c,d); } else if (MAX(b,MAX(c,d))==c) { B=c; C=MAX(b,d); D=MIN(b,d); } else if (MAX(b,MAX(c,d))==d) { B=d; C=MAX(b,c); D=MIN(b,c); } } else if (A==b) { if (MAX(a,MAX(c,d))==a) { B=a; C=MAX(c,d); D=MIN(c,d); } else if (MAX(a,MAX(c,d))==c) { B=c; C=MAX(a,d); D=MIN(a,d); } else if (MAX(a,MAX(c,d))==d) { B=d; C=MAX(a,c); D=MIN(a,c); } } else if (A==c) { if (MAX(b,MAX(a,d))==b) { B=b; C=MAX(a,d); D=MIN(a,d); } else if (MAX(b,MAX(a,d))==a) { B=a; C=MAX(b,d); D=MIN(b,d); } else if (MAX(b,MAX(a,d))==d) { B=d; C=MAX(b,a); D=MIN(b,a); } } else if (A==d) { if (MAX(b,MAX(c,a))==b) { B=b; C=MAX(c,a); D=MIN(c,a); } else if (MAX(b,MAX(c,a))==c) { B=c; C=MAX(b,a); D=MIN(b,a); } else if (MAX(b,MAX(c,a))==a) { B=a; C=MAX(b,c); D=MIN(b,c); } }; // pentatopic number + tetrahedral number + triangular number + square number // A(A+1)(A+2)(A+3)/4! + B(B+1)(B+2)/3! + C(C+1)/2! + D/1! size_t p = (A*A*A*A + 6*A*A*A + 11*A*A + 6*A)/24 + (B*B*B + 3*B*B + 2*B)/6 + (C*C + C)/2 + D ; // fully square number size_t q = i+j*norb+k*norb2+h*norb*norb2+l*norb2*norb2+m*norb*norb2*norb2+n*norb2*norb2*norb2+o*norb*norb2*norb2*norb2; e4[q] = fj[p]; } FILE *f2 = fopen(fout, "wb"); fwrite(e4, sizeof(*e4), norb2*norb2*norb2*norb2, f2); fclose(f2); free(e4); free(fj); }; void unpackE3_BLOCK(char* file, char* fout, int norb) { FILE *f = fopen(file, "rb"); size_t norb2 = norb*norb; // no symmetry! size_t e3slicesize = (norb2*norb2*norb2); double *fj = (double*)malloc(e3slicesize*sizeof(double)); fseek(f,93,SEEK_SET); fread(fj, sizeof(*fj), e3slicesize, f); fclose(f); double *e3 = (double*)malloc(norb2*norb2*norb2*sizeof(double)); #pragma omp parallel default(none) \ shared(norb, norb2, e3, fj) { int i, j, k, l, m, n; #pragma omp parallel for for (i=0; i<norb; i++) for (j=0; j<norb; j++) for (k=0; k<norb; k++) for (l=0; l<norb; l++) for (m=0; m<norb; m++) for (n=0; n<norb; n++) { // is given as E^ijk_nml and is expected to come out as E^ijk_lmn size_t p = i+j*norb+k*norb2 +n*norb*norb2+m*norb2*norb2+l*norb2*norb2*norb; size_t q = i+j*norb+k*norb2 +l*norb*norb2+m*norb2*norb2+n*norb2*norb2*norb; e3[q] = fj[p]; }; } FILE *f2 = fopen(fout, "wb"); fwrite(e3, sizeof(*e3), norb2*norb2*norb2, f2); fclose(f2); free(e3); free(fj); }; void unpackE4_BLOCK(char* file, char* fout, int norb) { FILE *f = fopen(file, "rb"); size_t norb2 = norb*norb; // no symmetry! size_t e4slicesize = (norb2*norb2*norb2*norb2); double *fj = (double*)malloc(e4slicesize*sizeof(double)); fseek(f,109,SEEK_SET); fread(fj, sizeof(*fj), e4slicesize, f); fclose(f); double *e4 = (double*)malloc(norb2*norb2*norb2*norb2*sizeof(double)); int i, j, k, h, l, m, n, o; for (i=0; i<norb; i++) for (j=0; j<norb; j++) for (k=0; k<norb; k++) for (h=0; h<norb; h++) for (l=0; l<norb; l++) for (m=0; m<norb; m++) for (n=0; n<norb; n++) for (o=0; o<norb; o++) { // is given as E^ijkh_onml and is expected to come out as E^ijkh_lmno size_t p = i+j*norb+k*norb2+h*norb*norb2 +o*norb2*norb2+n*norb*norb2*norb2+m*norb2*norb2*norb2+l*norb*norb2*norb2*norb2; size_t q = i+j*norb+k*norb2+h*norb*norb2 +l*norb2*norb2+m*norb*norb2*norb2+n*norb2*norb2*norb2+o*norb*norb2*norb2*norb2; e4[q] = fj[p]; }; FILE *f2 = fopen(fout, "wb"); fwrite(e4, sizeof(*e4), norb2*norb2*norb2*norb2, f2); fclose(f2); free(e4); free(fj); };
GB_binop__iseq_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__iseq_int8) // A.*B function (eWiseMult): GB (_AemultB_08__iseq_int8) // A.*B function (eWiseMult): GB (_AemultB_02__iseq_int8) // A.*B function (eWiseMult): GB (_AemultB_04__iseq_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_int8) // A*D function (colscale): GB (_AxD__iseq_int8) // D*A function (rowscale): GB (_DxB__iseq_int8) // C+=B function (dense accum): GB (_Cdense_accumB__iseq_int8) // C+=b function (dense accum): GB (_Cdense_accumb__iseq_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_int8) // C=scalar+B GB (_bind1st__iseq_int8) // C=scalar+B' GB (_bind1st_tran__iseq_int8) // C=A+scalar GB (_bind2nd__iseq_int8) // C=A'+scalar GB (_bind2nd_tran__iseq_int8) // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISEQ || GxB_NO_INT8 || GxB_NO_ISEQ_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__iseq_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__iseq_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__iseq_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__iseq_int8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__iseq_int8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__iseq_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__iseq_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__iseq_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__iseq_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__iseq_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__iseq_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__iseq_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__iseq_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__iseq_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
sync.c
/* * Copyright (c) 2009, 2010, 2011, ETH Zurich. * All rights reserved. * * This file is distributed under the terms in the attached LICENSE file. * If you do not find this file, copies can be found by writing to: * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group. */ #include <stdlib.h> #include <stdio.h> #include <assert.h> #include <stdint.h> #include <omp.h> #include <arch/x86/barrelfish_kpi/asm_inlines_arch.h> #define GANG_SCHEDULING #undef MEASURE_SYNC #define MEASURE #define WORK_PERIOD 5000000000UL #define STACK_SIZE (64 * 1024) int main(int argc, char *argv[]) { uint64_t now, start; volatile uint64_t workcnt, workload = 0; int64_t workmax = 1000; int64_t i; if(argc == 1) { printf("calibrating...\n"); do { workload = 0; workmax *= 2; start = rdtsc(); #pragma omp parallel private(i,workload) for(i = 0; i < workmax; i++) { #pragma omp barrier workload++; } now = rdtsc(); } while(now - start < WORK_PERIOD); printf("workmax = %ld\n", workmax); return 0; } else { workmax = atol(argv[1]); } int nthreads = omp_get_max_threads(); if(argc == 3) { nthreads = atoi(argv[2]); backend_span_domain(nthreads, STACK_SIZE); bomp_custom_init(); omp_set_num_threads(nthreads); } printf("threads %d, workmax %ld, CPUs %d\n", nthreads, workmax, omp_get_num_procs()); #ifdef MEASURE_SYNC uint64_t waits[16] = { 0, 1000, 1000000, 1000000000, 500, 5000000, 5000000000, 3000000, 0, 1000, 1000000, 1000000000, 500, 5000000, 5000000000, 3000000 }; uint64_t ts[16][10]; printf("before sync:\n"); #pragma omp parallel private(workcnt) { for(int j = 0; j < waits[omp_get_thread_num()]; j++) { workcnt++; } for(int j = 0; j < 10; j++) { ts[omp_get_thread_num()][j] = rdtsc(); } } for(int j = 0; j < 10; j++) { printf("timestamp %d: ", j); for(int n = 1; n < nthreads; n++) { printf("%ld ", ts[n][j] - ts[n - 1][j]); } printf("\n"); } printf("after sync:\n"); #pragma omp parallel { bomp_synchronize(); for(int j = 0; j < 10; j++) { ts[omp_get_thread_num()][j] = rdtsc(); } } for(int j = 0; j < 10; j++) { printf("timestamp %d: ", j); for(int n = 1; n < nthreads; n++) { printf("%ld ", ts[n][j] - ts[n - 1][j]); } printf("\n"); } #endif #ifdef GANG_SCHEDULING #pragma omp parallel { bomp_synchronize(); } #endif start = rdtsc(); #ifdef MEASURE # define MAXTHREADS 16 # define WORKMAX 10000 static uint64_t starta[MAXTHREADS][WORKMAX]; static uint64_t end1[MAXTHREADS][WORKMAX]; static uint64_t end2[MAXTHREADS][WORKMAX]; #endif // Do some work #pragma omp parallel private(workcnt,i) for(i = 0; i < workmax; i++) { #ifdef MEASURE starta[omp_get_thread_num()][i < WORKMAX ? i : WORKMAX] = rdtsc(); #endif workcnt++; #ifdef MEASURE end1[omp_get_thread_num()][i < WORKMAX ? i : WORKMAX] = rdtsc(); #endif #pragma omp barrier #ifdef MEASURE end2[omp_get_thread_num()][i < WORKMAX ? i : WORKMAX] = rdtsc(); #endif } now = rdtsc(); #ifdef MEASURE printf("avg compute time: "); for(int n = 0; n < nthreads; n++) { uint64_t sum = 0, min = end1[0][0], max = 0; for(i = 0; i < WORKMAX; i++) { uint64_t val = end1[n][i] - starta[n][i]; sum += val; min = val < min ? val : min; max = val > max ? val : max; } printf("%lu(%lu,%lu) ", sum / WORKMAX, min, max); } printf("\n"); #if 0 printf("wait time dump:\n"); for(i = 0; i < WORKMAX; i++) { for(int n = 0; n < nthreads; n++) { uint64_t val = end2[n][i] - end1[n][i]; printf("%lu ", val); } printf("\n"); } #endif printf("avg wait time: "); for(int n = 0; n < nthreads; n++) { uint64_t sum = 0, min = end2[0][0], max = 0; for(i = 0; i < WORKMAX; i++) { uint64_t val = end2[n][i] - end1[n][i]; sum += val; min = val < min ? val : min; max = val > max ? val : max; } printf("%lu(%lu,%lu) ", sum / WORKMAX, min, max); } printf("\n"); #endif printf("%s: threads %d, compute time %lu ticks\n", argv[0], nthreads, now - start); for(;;); return 0; }
dds.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD DDDD SSSSS % % D D D D SS % % D D D D SSS % % D D D D SS % % DDDD DDDD SSSSS % % % % % % Read/Write Microsoft Direct Draw Surface Image Format % % % % Software Design % % Bianca van Schaik % % March 2008 % % Dirk Lemstra % % September 2013 % % % % % % Copyright @ 2008 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/module.h" #include "MagickCore/transform.h" /* Definitions */ #define DDSD_CAPS 0x00000001 #define DDSD_HEIGHT 0x00000002 #define DDSD_WIDTH 0x00000004 #define DDSD_PITCH 0x00000008 #define DDSD_PIXELFORMAT 0x00001000 #define DDSD_MIPMAPCOUNT 0x00020000 #define DDSD_LINEARSIZE 0x00080000 #define DDSD_DEPTH 0x00800000 #define DDPF_ALPHAPIXELS 0x00000001 #define DDPF_FOURCC 0x00000004 #define DDPF_RGB 0x00000040 #define DDPF_LUMINANCE 0x00020000 #define FOURCC_DXT1 0x31545844 #define FOURCC_DXT3 0x33545844 #define FOURCC_DXT5 0x35545844 #define FOURCC_DX10 0x30315844 #define DDSCAPS_COMPLEX 0x00000008 #define DDSCAPS_TEXTURE 0x00001000 #define DDSCAPS_MIPMAP 0x00400000 #define DDSCAPS2_CUBEMAP 0x00000200 #define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400 #define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800 #define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000 #define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000 #define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000 #define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000 #define DDSCAPS2_VOLUME 0x00200000 #define DDSEXT_DIMENSION_TEX2D 0x00000003 #define DDSEXTFLAGS_CUBEMAP 0x00000004 typedef enum DXGI_FORMAT { DXGI_FORMAT_UNKNOWN, DXGI_FORMAT_R32G32B32A32_TYPELESS, DXGI_FORMAT_R32G32B32A32_FLOAT, DXGI_FORMAT_R32G32B32A32_UINT, DXGI_FORMAT_R32G32B32A32_SINT, DXGI_FORMAT_R32G32B32_TYPELESS, DXGI_FORMAT_R32G32B32_FLOAT, DXGI_FORMAT_R32G32B32_UINT, DXGI_FORMAT_R32G32B32_SINT, DXGI_FORMAT_R16G16B16A16_TYPELESS, DXGI_FORMAT_R16G16B16A16_FLOAT, DXGI_FORMAT_R16G16B16A16_UNORM, DXGI_FORMAT_R16G16B16A16_UINT, DXGI_FORMAT_R16G16B16A16_SNORM, DXGI_FORMAT_R16G16B16A16_SINT, DXGI_FORMAT_R32G32_TYPELESS, DXGI_FORMAT_R32G32_FLOAT, DXGI_FORMAT_R32G32_UINT, DXGI_FORMAT_R32G32_SINT, DXGI_FORMAT_R32G8X24_TYPELESS, DXGI_FORMAT_D32_FLOAT_S8X24_UINT, DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS, DXGI_FORMAT_X32_TYPELESS_G8X24_UINT, DXGI_FORMAT_R10G10B10A2_TYPELESS, DXGI_FORMAT_R10G10B10A2_UNORM, DXGI_FORMAT_R10G10B10A2_UINT, DXGI_FORMAT_R11G11B10_FLOAT, DXGI_FORMAT_R8G8B8A8_TYPELESS, DXGI_FORMAT_R8G8B8A8_UNORM, DXGI_FORMAT_R8G8B8A8_UNORM_SRGB, DXGI_FORMAT_R8G8B8A8_UINT, DXGI_FORMAT_R8G8B8A8_SNORM, DXGI_FORMAT_R8G8B8A8_SINT, DXGI_FORMAT_R16G16_TYPELESS, DXGI_FORMAT_R16G16_FLOAT, DXGI_FORMAT_R16G16_UNORM, DXGI_FORMAT_R16G16_UINT, DXGI_FORMAT_R16G16_SNORM, DXGI_FORMAT_R16G16_SINT, DXGI_FORMAT_R32_TYPELESS, DXGI_FORMAT_D32_FLOAT, DXGI_FORMAT_R32_FLOAT, DXGI_FORMAT_R32_UINT, DXGI_FORMAT_R32_SINT, DXGI_FORMAT_R24G8_TYPELESS, DXGI_FORMAT_D24_UNORM_S8_UINT, DXGI_FORMAT_R24_UNORM_X8_TYPELESS, DXGI_FORMAT_X24_TYPELESS_G8_UINT, DXGI_FORMAT_R8G8_TYPELESS, DXGI_FORMAT_R8G8_UNORM, DXGI_FORMAT_R8G8_UINT, DXGI_FORMAT_R8G8_SNORM, DXGI_FORMAT_R8G8_SINT, DXGI_FORMAT_R16_TYPELESS, DXGI_FORMAT_R16_FLOAT, DXGI_FORMAT_D16_UNORM, DXGI_FORMAT_R16_UNORM, DXGI_FORMAT_R16_UINT, DXGI_FORMAT_R16_SNORM, DXGI_FORMAT_R16_SINT, DXGI_FORMAT_R8_TYPELESS, DXGI_FORMAT_R8_UNORM, DXGI_FORMAT_R8_UINT, DXGI_FORMAT_R8_SNORM, DXGI_FORMAT_R8_SINT, DXGI_FORMAT_A8_UNORM, DXGI_FORMAT_R1_UNORM, DXGI_FORMAT_R9G9B9E5_SHAREDEXP, DXGI_FORMAT_R8G8_B8G8_UNORM, DXGI_FORMAT_G8R8_G8B8_UNORM, DXGI_FORMAT_BC1_TYPELESS, DXGI_FORMAT_BC1_UNORM, DXGI_FORMAT_BC1_UNORM_SRGB, DXGI_FORMAT_BC2_TYPELESS, DXGI_FORMAT_BC2_UNORM, DXGI_FORMAT_BC2_UNORM_SRGB, DXGI_FORMAT_BC3_TYPELESS, DXGI_FORMAT_BC3_UNORM, DXGI_FORMAT_BC3_UNORM_SRGB, DXGI_FORMAT_BC4_TYPELESS, DXGI_FORMAT_BC4_UNORM, DXGI_FORMAT_BC4_SNORM, DXGI_FORMAT_BC5_TYPELESS, DXGI_FORMAT_BC5_UNORM, DXGI_FORMAT_BC5_SNORM, DXGI_FORMAT_B5G6R5_UNORM, DXGI_FORMAT_B5G5R5A1_UNORM, DXGI_FORMAT_B8G8R8A8_UNORM, DXGI_FORMAT_B8G8R8X8_UNORM, DXGI_FORMAT_R10G10B10_XR_BIAS_A2_UNORM, DXGI_FORMAT_B8G8R8A8_TYPELESS, DXGI_FORMAT_B8G8R8A8_UNORM_SRGB, DXGI_FORMAT_B8G8R8X8_TYPELESS, DXGI_FORMAT_B8G8R8X8_UNORM_SRGB, DXGI_FORMAT_BC6H_TYPELESS, DXGI_FORMAT_BC6H_UF16, DXGI_FORMAT_BC6H_SF16, DXGI_FORMAT_BC7_TYPELESS, DXGI_FORMAT_BC7_UNORM, DXGI_FORMAT_BC7_UNORM_SRGB, DXGI_FORMAT_AYUV, DXGI_FORMAT_Y410, DXGI_FORMAT_Y416, DXGI_FORMAT_NV12, DXGI_FORMAT_P010, DXGI_FORMAT_P016, DXGI_FORMAT_420_OPAQUE, DXGI_FORMAT_YUY2, DXGI_FORMAT_Y210, DXGI_FORMAT_Y216, DXGI_FORMAT_NV11, DXGI_FORMAT_AI44, DXGI_FORMAT_IA44, DXGI_FORMAT_P8, DXGI_FORMAT_A8P8, DXGI_FORMAT_B4G4R4A4_UNORM, DXGI_FORMAT_P208, DXGI_FORMAT_V208, DXGI_FORMAT_V408, DXGI_FORMAT_SAMPLER_FEEDBACK_MIN_MIP_OPAQUE, DXGI_FORMAT_SAMPLER_FEEDBACK_MIP_REGION_USED_OPAQUE, DXGI_FORMAT_FORCE_UINT } DXGI_FORMAT; #ifndef SIZE_MAX #define SIZE_MAX ((size_t) -1) #endif /* Structure declarations. */ typedef struct _DDSPixelFormat { size_t flags, fourcc, rgb_bitcount, r_bitmask, g_bitmask, b_bitmask, alpha_bitmask; } DDSPixelFormat; typedef struct _DDSInfo { size_t flags, height, width, pitchOrLinearSize, depth, mipmapcount, ddscaps1, ddscaps2, extFormat, extDimension, extFlags, extArraySize, extFlags2; DDSPixelFormat pixelformat; } DDSInfo; typedef struct _DDSColors { unsigned char r[4], g[4], b[4], a[4]; } DDSColors; typedef struct _BC7Colors { unsigned char r[6], g[6], b[6], a[6]; } BC7Colors; typedef struct _DDSVector4 { float x, y, z, w; } DDSVector4; typedef struct _DDSVector3 { float x, y, z; } DDSVector3; typedef struct _DDSSourceBlock { unsigned char start, end, error; } DDSSourceBlock; typedef struct _DDSSingleColorLookup { DDSSourceBlock sources[2]; } DDSSingleColorLookup; typedef struct _BC7ModeInfo { unsigned char partition_bits, num_subsets, color_precision, alpha_precision, num_pbits, index_precision, index2_precision; } BC7ModeInfo; typedef MagickBooleanType DDSDecoder(const ImageInfo *,Image *,const DDSInfo *,const MagickBooleanType, ExceptionInfo *); typedef MagickBooleanType DDSPixelDecoder(Image *,const DDSInfo *,ExceptionInfo *); static const DDSSingleColorLookup DDSLookup_5_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 1 } } }, { { { 0, 0, 2 }, { 0, 1, 0 } } }, { { { 0, 0, 3 }, { 0, 1, 1 } } }, { { { 0, 0, 4 }, { 0, 2, 1 } } }, { { { 1, 0, 3 }, { 0, 2, 0 } } }, { { { 1, 0, 2 }, { 0, 2, 1 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 1, 2, 1 } } }, { { { 1, 0, 2 }, { 1, 2, 0 } } }, { { { 1, 0, 3 }, { 0, 4, 0 } } }, { { { 1, 0, 4 }, { 0, 5, 1 } } }, { { { 2, 0, 3 }, { 0, 5, 0 } } }, { { { 2, 0, 2 }, { 0, 5, 1 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 2, 3, 1 } } }, { { { 2, 0, 2 }, { 2, 3, 0 } } }, { { { 2, 0, 3 }, { 0, 7, 0 } } }, { { { 2, 0, 4 }, { 1, 6, 1 } } }, { { { 3, 0, 3 }, { 1, 6, 0 } } }, { { { 3, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 2 }, { 0, 10, 1 } } }, { { { 3, 0, 3 }, { 0, 10, 0 } } }, { { { 3, 0, 4 }, { 2, 7, 1 } } }, { { { 4, 0, 4 }, { 2, 7, 0 } } }, { { { 4, 0, 3 }, { 0, 11, 0 } } }, { { { 4, 0, 2 }, { 1, 10, 1 } } }, { { { 4, 0, 1 }, { 1, 10, 0 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 1 } } }, { { { 4, 0, 2 }, { 0, 13, 0 } } }, { { { 4, 0, 3 }, { 0, 13, 1 } } }, { { { 4, 0, 4 }, { 0, 14, 1 } } }, { { { 5, 0, 3 }, { 0, 14, 0 } } }, { { { 5, 0, 2 }, { 2, 11, 1 } } }, { { { 5, 0, 1 }, { 2, 11, 0 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 1, 14, 1 } } }, { { { 5, 0, 2 }, { 1, 14, 0 } } }, { { { 5, 0, 3 }, { 0, 16, 0 } } }, { { { 5, 0, 4 }, { 0, 17, 1 } } }, { { { 6, 0, 3 }, { 0, 17, 0 } } }, { { { 6, 0, 2 }, { 0, 17, 1 } } }, { { { 6, 0, 1 }, { 0, 18, 1 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 2, 15, 1 } } }, { { { 6, 0, 2 }, { 2, 15, 0 } } }, { { { 6, 0, 3 }, { 0, 19, 0 } } }, { { { 6, 0, 4 }, { 1, 18, 1 } } }, { { { 7, 0, 3 }, { 1, 18, 0 } } }, { { { 7, 0, 2 }, { 0, 20, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 2 }, { 0, 22, 1 } } }, { { { 7, 0, 3 }, { 0, 22, 0 } } }, { { { 7, 0, 4 }, { 2, 19, 1 } } }, { { { 8, 0, 4 }, { 2, 19, 0 } } }, { { { 8, 0, 3 }, { 0, 23, 0 } } }, { { { 8, 0, 2 }, { 1, 22, 1 } } }, { { { 8, 0, 1 }, { 1, 22, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 1 } } }, { { { 8, 0, 2 }, { 0, 25, 0 } } }, { { { 8, 0, 3 }, { 0, 25, 1 } } }, { { { 8, 0, 4 }, { 0, 26, 1 } } }, { { { 9, 0, 3 }, { 0, 26, 0 } } }, { { { 9, 0, 2 }, { 2, 23, 1 } } }, { { { 9, 0, 1 }, { 2, 23, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 1, 26, 1 } } }, { { { 9, 0, 2 }, { 1, 26, 0 } } }, { { { 9, 0, 3 }, { 0, 28, 0 } } }, { { { 9, 0, 4 }, { 0, 29, 1 } } }, { { { 10, 0, 3 }, { 0, 29, 0 } } }, { { { 10, 0, 2 }, { 0, 29, 1 } } }, { { { 10, 0, 1 }, { 0, 30, 1 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 2, 27, 1 } } }, { { { 10, 0, 2 }, { 2, 27, 0 } } }, { { { 10, 0, 3 }, { 0, 31, 0 } } }, { { { 10, 0, 4 }, { 1, 30, 1 } } }, { { { 11, 0, 3 }, { 1, 30, 0 } } }, { { { 11, 0, 2 }, { 4, 24, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 0 }, { 1, 31, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 2 }, { 2, 30, 1 } } }, { { { 11, 0, 3 }, { 2, 30, 0 } } }, { { { 11, 0, 4 }, { 2, 31, 1 } } }, { { { 12, 0, 4 }, { 2, 31, 0 } } }, { { { 12, 0, 3 }, { 4, 27, 0 } } }, { { { 12, 0, 2 }, { 3, 30, 1 } } }, { { { 12, 0, 1 }, { 3, 30, 0 } } }, { { { 12, 0, 0 }, { 4, 28, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 1 } } }, { { { 12, 0, 2 }, { 3, 31, 0 } } }, { { { 12, 0, 3 }, { 3, 31, 1 } } }, { { { 12, 0, 4 }, { 4, 30, 1 } } }, { { { 13, 0, 3 }, { 4, 30, 0 } } }, { { { 13, 0, 2 }, { 6, 27, 1 } } }, { { { 13, 0, 1 }, { 6, 27, 0 } } }, { { { 13, 0, 0 }, { 4, 31, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 1 } } }, { { { 13, 0, 2 }, { 5, 30, 0 } } }, { { { 13, 0, 3 }, { 8, 24, 0 } } }, { { { 13, 0, 4 }, { 5, 31, 1 } } }, { { { 14, 0, 3 }, { 5, 31, 0 } } }, { { { 14, 0, 2 }, { 5, 31, 1 } } }, { { { 14, 0, 1 }, { 6, 30, 1 } } }, { { { 14, 0, 0 }, { 6, 30, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 1 } } }, { { { 14, 0, 2 }, { 6, 31, 0 } } }, { { { 14, 0, 3 }, { 8, 27, 0 } } }, { { { 14, 0, 4 }, { 7, 30, 1 } } }, { { { 15, 0, 3 }, { 7, 30, 0 } } }, { { { 15, 0, 2 }, { 8, 28, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 0 }, { 7, 31, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 2 }, { 8, 30, 1 } } }, { { { 15, 0, 3 }, { 8, 30, 0 } } }, { { { 15, 0, 4 }, { 10, 27, 1 } } }, { { { 16, 0, 4 }, { 10, 27, 0 } } }, { { { 16, 0, 3 }, { 8, 31, 0 } } }, { { { 16, 0, 2 }, { 9, 30, 1 } } }, { { { 16, 0, 1 }, { 9, 30, 0 } } }, { { { 16, 0, 0 }, { 12, 24, 0 } } }, { { { 16, 0, 1 }, { 9, 31, 1 } } }, { { { 16, 0, 2 }, { 9, 31, 0 } } }, { { { 16, 0, 3 }, { 9, 31, 1 } } }, { { { 16, 0, 4 }, { 10, 30, 1 } } }, { { { 17, 0, 3 }, { 10, 30, 0 } } }, { { { 17, 0, 2 }, { 10, 31, 1 } } }, { { { 17, 0, 1 }, { 10, 31, 0 } } }, { { { 17, 0, 0 }, { 12, 27, 0 } } }, { { { 17, 0, 1 }, { 11, 30, 1 } } }, { { { 17, 0, 2 }, { 11, 30, 0 } } }, { { { 17, 0, 3 }, { 12, 28, 0 } } }, { { { 17, 0, 4 }, { 11, 31, 1 } } }, { { { 18, 0, 3 }, { 11, 31, 0 } } }, { { { 18, 0, 2 }, { 11, 31, 1 } } }, { { { 18, 0, 1 }, { 12, 30, 1 } } }, { { { 18, 0, 0 }, { 12, 30, 0 } } }, { { { 18, 0, 1 }, { 14, 27, 1 } } }, { { { 18, 0, 2 }, { 14, 27, 0 } } }, { { { 18, 0, 3 }, { 12, 31, 0 } } }, { { { 18, 0, 4 }, { 13, 30, 1 } } }, { { { 19, 0, 3 }, { 13, 30, 0 } } }, { { { 19, 0, 2 }, { 16, 24, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 0 }, { 13, 31, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 2 }, { 14, 30, 1 } } }, { { { 19, 0, 3 }, { 14, 30, 0 } } }, { { { 19, 0, 4 }, { 14, 31, 1 } } }, { { { 20, 0, 4 }, { 14, 31, 0 } } }, { { { 20, 0, 3 }, { 16, 27, 0 } } }, { { { 20, 0, 2 }, { 15, 30, 1 } } }, { { { 20, 0, 1 }, { 15, 30, 0 } } }, { { { 20, 0, 0 }, { 16, 28, 0 } } }, { { { 20, 0, 1 }, { 15, 31, 1 } } }, { { { 20, 0, 2 }, { 15, 31, 0 } } }, { { { 20, 0, 3 }, { 15, 31, 1 } } }, { { { 20, 0, 4 }, { 16, 30, 1 } } }, { { { 21, 0, 3 }, { 16, 30, 0 } } }, { { { 21, 0, 2 }, { 18, 27, 1 } } }, { { { 21, 0, 1 }, { 18, 27, 0 } } }, { { { 21, 0, 0 }, { 16, 31, 0 } } }, { { { 21, 0, 1 }, { 17, 30, 1 } } }, { { { 21, 0, 2 }, { 17, 30, 0 } } }, { { { 21, 0, 3 }, { 20, 24, 0 } } }, { { { 21, 0, 4 }, { 17, 31, 1 } } }, { { { 22, 0, 3 }, { 17, 31, 0 } } }, { { { 22, 0, 2 }, { 17, 31, 1 } } }, { { { 22, 0, 1 }, { 18, 30, 1 } } }, { { { 22, 0, 0 }, { 18, 30, 0 } } }, { { { 22, 0, 1 }, { 18, 31, 1 } } }, { { { 22, 0, 2 }, { 18, 31, 0 } } }, { { { 22, 0, 3 }, { 20, 27, 0 } } }, { { { 22, 0, 4 }, { 19, 30, 1 } } }, { { { 23, 0, 3 }, { 19, 30, 0 } } }, { { { 23, 0, 2 }, { 20, 28, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 0 }, { 19, 31, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 2 }, { 20, 30, 1 } } }, { { { 23, 0, 3 }, { 20, 30, 0 } } }, { { { 23, 0, 4 }, { 22, 27, 1 } } }, { { { 24, 0, 4 }, { 22, 27, 0 } } }, { { { 24, 0, 3 }, { 20, 31, 0 } } }, { { { 24, 0, 2 }, { 21, 30, 1 } } }, { { { 24, 0, 1 }, { 21, 30, 0 } } }, { { { 24, 0, 0 }, { 24, 24, 0 } } }, { { { 24, 0, 1 }, { 21, 31, 1 } } }, { { { 24, 0, 2 }, { 21, 31, 0 } } }, { { { 24, 0, 3 }, { 21, 31, 1 } } }, { { { 24, 0, 4 }, { 22, 30, 1 } } }, { { { 25, 0, 3 }, { 22, 30, 0 } } }, { { { 25, 0, 2 }, { 22, 31, 1 } } }, { { { 25, 0, 1 }, { 22, 31, 0 } } }, { { { 25, 0, 0 }, { 24, 27, 0 } } }, { { { 25, 0, 1 }, { 23, 30, 1 } } }, { { { 25, 0, 2 }, { 23, 30, 0 } } }, { { { 25, 0, 3 }, { 24, 28, 0 } } }, { { { 25, 0, 4 }, { 23, 31, 1 } } }, { { { 26, 0, 3 }, { 23, 31, 0 } } }, { { { 26, 0, 2 }, { 23, 31, 1 } } }, { { { 26, 0, 1 }, { 24, 30, 1 } } }, { { { 26, 0, 0 }, { 24, 30, 0 } } }, { { { 26, 0, 1 }, { 26, 27, 1 } } }, { { { 26, 0, 2 }, { 26, 27, 0 } } }, { { { 26, 0, 3 }, { 24, 31, 0 } } }, { { { 26, 0, 4 }, { 25, 30, 1 } } }, { { { 27, 0, 3 }, { 25, 30, 0 } } }, { { { 27, 0, 2 }, { 28, 24, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 0 }, { 25, 31, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 2 }, { 26, 30, 1 } } }, { { { 27, 0, 3 }, { 26, 30, 0 } } }, { { { 27, 0, 4 }, { 26, 31, 1 } } }, { { { 28, 0, 4 }, { 26, 31, 0 } } }, { { { 28, 0, 3 }, { 28, 27, 0 } } }, { { { 28, 0, 2 }, { 27, 30, 1 } } }, { { { 28, 0, 1 }, { 27, 30, 0 } } }, { { { 28, 0, 0 }, { 28, 28, 0 } } }, { { { 28, 0, 1 }, { 27, 31, 1 } } }, { { { 28, 0, 2 }, { 27, 31, 0 } } }, { { { 28, 0, 3 }, { 27, 31, 1 } } }, { { { 28, 0, 4 }, { 28, 30, 1 } } }, { { { 29, 0, 3 }, { 28, 30, 0 } } }, { { { 29, 0, 2 }, { 30, 27, 1 } } }, { { { 29, 0, 1 }, { 30, 27, 0 } } }, { { { 29, 0, 0 }, { 28, 31, 0 } } }, { { { 29, 0, 1 }, { 29, 30, 1 } } }, { { { 29, 0, 2 }, { 29, 30, 0 } } }, { { { 29, 0, 3 }, { 29, 30, 1 } } }, { { { 29, 0, 4 }, { 29, 31, 1 } } }, { { { 30, 0, 3 }, { 29, 31, 0 } } }, { { { 30, 0, 2 }, { 29, 31, 1 } } }, { { { 30, 0, 1 }, { 30, 30, 1 } } }, { { { 30, 0, 0 }, { 30, 30, 0 } } }, { { { 30, 0, 1 }, { 30, 31, 1 } } }, { { { 30, 0, 2 }, { 30, 31, 0 } } }, { { { 30, 0, 3 }, { 30, 31, 1 } } }, { { { 30, 0, 4 }, { 31, 30, 1 } } }, { { { 31, 0, 3 }, { 31, 30, 0 } } }, { { { 31, 0, 2 }, { 31, 30, 1 } } }, { { { 31, 0, 1 }, { 31, 31, 1 } } }, { { { 31, 0, 0 }, { 31, 31, 0 } } } }; static const DDSSingleColorLookup DDSLookup_6_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 0 } } }, { { { 0, 0, 2 }, { 0, 2, 0 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 0, 4, 0 } } }, { { { 1, 0, 2 }, { 0, 5, 0 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 0, 7, 0 } } }, { { { 2, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 10, 0 } } }, { { { 3, 0, 2 }, { 0, 11, 0 } } }, { { { 4, 0, 1 }, { 0, 12, 1 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 0 } } }, { { { 4, 0, 2 }, { 0, 14, 0 } } }, { { { 5, 0, 1 }, { 0, 15, 1 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 0, 16, 0 } } }, { { { 5, 0, 2 }, { 1, 15, 0 } } }, { { { 6, 0, 1 }, { 0, 17, 0 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 0, 19, 0 } } }, { { { 6, 0, 2 }, { 3, 14, 0 } } }, { { { 7, 0, 1 }, { 0, 20, 0 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 22, 0 } } }, { { { 7, 0, 2 }, { 4, 15, 0 } } }, { { { 8, 0, 1 }, { 0, 23, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 0 } } }, { { { 8, 0, 2 }, { 6, 14, 0 } } }, { { { 9, 0, 1 }, { 0, 26, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 0, 28, 0 } } }, { { { 9, 0, 2 }, { 7, 15, 0 } } }, { { { 10, 0, 1 }, { 0, 29, 0 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 0, 31, 0 } } }, { { { 10, 0, 2 }, { 9, 14, 0 } } }, { { { 11, 0, 1 }, { 0, 32, 0 } } }, { { { 11, 0, 0 }, { 0, 33, 0 } } }, { { { 11, 0, 1 }, { 2, 30, 0 } } }, { { { 11, 0, 2 }, { 0, 34, 0 } } }, { { { 12, 0, 1 }, { 0, 35, 0 } } }, { { { 12, 0, 0 }, { 0, 36, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 0 } } }, { { { 12, 0, 2 }, { 0, 37, 0 } } }, { { { 13, 0, 1 }, { 0, 38, 0 } } }, { { { 13, 0, 0 }, { 0, 39, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 0 } } }, { { { 13, 0, 2 }, { 0, 40, 0 } } }, { { { 14, 0, 1 }, { 0, 41, 0 } } }, { { { 14, 0, 0 }, { 0, 42, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 0 } } }, { { { 14, 0, 2 }, { 0, 43, 0 } } }, { { { 15, 0, 1 }, { 0, 44, 0 } } }, { { { 15, 0, 0 }, { 0, 45, 0 } } }, { { { 15, 0, 1 }, { 8, 30, 0 } } }, { { { 15, 0, 2 }, { 0, 46, 0 } } }, { { { 16, 0, 2 }, { 0, 47, 0 } } }, { { { 16, 0, 1 }, { 1, 46, 0 } } }, { { { 16, 0, 0 }, { 0, 48, 0 } } }, { { { 16, 0, 1 }, { 0, 49, 0 } } }, { { { 16, 0, 2 }, { 0, 50, 0 } } }, { { { 17, 0, 1 }, { 2, 47, 0 } } }, { { { 17, 0, 0 }, { 0, 51, 0 } } }, { { { 17, 0, 1 }, { 0, 52, 0 } } }, { { { 17, 0, 2 }, { 0, 53, 0 } } }, { { { 18, 0, 1 }, { 4, 46, 0 } } }, { { { 18, 0, 0 }, { 0, 54, 0 } } }, { { { 18, 0, 1 }, { 0, 55, 0 } } }, { { { 18, 0, 2 }, { 0, 56, 0 } } }, { { { 19, 0, 1 }, { 5, 47, 0 } } }, { { { 19, 0, 0 }, { 0, 57, 0 } } }, { { { 19, 0, 1 }, { 0, 58, 0 } } }, { { { 19, 0, 2 }, { 0, 59, 0 } } }, { { { 20, 0, 1 }, { 7, 46, 0 } } }, { { { 20, 0, 0 }, { 0, 60, 0 } } }, { { { 20, 0, 1 }, { 0, 61, 0 } } }, { { { 20, 0, 2 }, { 0, 62, 0 } } }, { { { 21, 0, 1 }, { 8, 47, 0 } } }, { { { 21, 0, 0 }, { 0, 63, 0 } } }, { { { 21, 0, 1 }, { 1, 62, 0 } } }, { { { 21, 0, 2 }, { 1, 63, 0 } } }, { { { 22, 0, 1 }, { 10, 46, 0 } } }, { { { 22, 0, 0 }, { 2, 62, 0 } } }, { { { 22, 0, 1 }, { 2, 63, 0 } } }, { { { 22, 0, 2 }, { 3, 62, 0 } } }, { { { 23, 0, 1 }, { 11, 47, 0 } } }, { { { 23, 0, 0 }, { 3, 63, 0 } } }, { { { 23, 0, 1 }, { 4, 62, 0 } } }, { { { 23, 0, 2 }, { 4, 63, 0 } } }, { { { 24, 0, 1 }, { 13, 46, 0 } } }, { { { 24, 0, 0 }, { 5, 62, 0 } } }, { { { 24, 0, 1 }, { 5, 63, 0 } } }, { { { 24, 0, 2 }, { 6, 62, 0 } } }, { { { 25, 0, 1 }, { 14, 47, 0 } } }, { { { 25, 0, 0 }, { 6, 63, 0 } } }, { { { 25, 0, 1 }, { 7, 62, 0 } } }, { { { 25, 0, 2 }, { 7, 63, 0 } } }, { { { 26, 0, 1 }, { 16, 45, 0 } } }, { { { 26, 0, 0 }, { 8, 62, 0 } } }, { { { 26, 0, 1 }, { 8, 63, 0 } } }, { { { 26, 0, 2 }, { 9, 62, 0 } } }, { { { 27, 0, 1 }, { 16, 48, 0 } } }, { { { 27, 0, 0 }, { 9, 63, 0 } } }, { { { 27, 0, 1 }, { 10, 62, 0 } } }, { { { 27, 0, 2 }, { 10, 63, 0 } } }, { { { 28, 0, 1 }, { 16, 51, 0 } } }, { { { 28, 0, 0 }, { 11, 62, 0 } } }, { { { 28, 0, 1 }, { 11, 63, 0 } } }, { { { 28, 0, 2 }, { 12, 62, 0 } } }, { { { 29, 0, 1 }, { 16, 54, 0 } } }, { { { 29, 0, 0 }, { 12, 63, 0 } } }, { { { 29, 0, 1 }, { 13, 62, 0 } } }, { { { 29, 0, 2 }, { 13, 63, 0 } } }, { { { 30, 0, 1 }, { 16, 57, 0 } } }, { { { 30, 0, 0 }, { 14, 62, 0 } } }, { { { 30, 0, 1 }, { 14, 63, 0 } } }, { { { 30, 0, 2 }, { 15, 62, 0 } } }, { { { 31, 0, 1 }, { 16, 60, 0 } } }, { { { 31, 0, 0 }, { 15, 63, 0 } } }, { { { 31, 0, 1 }, { 24, 46, 0 } } }, { { { 31, 0, 2 }, { 16, 62, 0 } } }, { { { 32, 0, 2 }, { 16, 63, 0 } } }, { { { 32, 0, 1 }, { 17, 62, 0 } } }, { { { 32, 0, 0 }, { 25, 47, 0 } } }, { { { 32, 0, 1 }, { 17, 63, 0 } } }, { { { 32, 0, 2 }, { 18, 62, 0 } } }, { { { 33, 0, 1 }, { 18, 63, 0 } } }, { { { 33, 0, 0 }, { 27, 46, 0 } } }, { { { 33, 0, 1 }, { 19, 62, 0 } } }, { { { 33, 0, 2 }, { 19, 63, 0 } } }, { { { 34, 0, 1 }, { 20, 62, 0 } } }, { { { 34, 0, 0 }, { 28, 47, 0 } } }, { { { 34, 0, 1 }, { 20, 63, 0 } } }, { { { 34, 0, 2 }, { 21, 62, 0 } } }, { { { 35, 0, 1 }, { 21, 63, 0 } } }, { { { 35, 0, 0 }, { 30, 46, 0 } } }, { { { 35, 0, 1 }, { 22, 62, 0 } } }, { { { 35, 0, 2 }, { 22, 63, 0 } } }, { { { 36, 0, 1 }, { 23, 62, 0 } } }, { { { 36, 0, 0 }, { 31, 47, 0 } } }, { { { 36, 0, 1 }, { 23, 63, 0 } } }, { { { 36, 0, 2 }, { 24, 62, 0 } } }, { { { 37, 0, 1 }, { 24, 63, 0 } } }, { { { 37, 0, 0 }, { 32, 47, 0 } } }, { { { 37, 0, 1 }, { 25, 62, 0 } } }, { { { 37, 0, 2 }, { 25, 63, 0 } } }, { { { 38, 0, 1 }, { 26, 62, 0 } } }, { { { 38, 0, 0 }, { 32, 50, 0 } } }, { { { 38, 0, 1 }, { 26, 63, 0 } } }, { { { 38, 0, 2 }, { 27, 62, 0 } } }, { { { 39, 0, 1 }, { 27, 63, 0 } } }, { { { 39, 0, 0 }, { 32, 53, 0 } } }, { { { 39, 0, 1 }, { 28, 62, 0 } } }, { { { 39, 0, 2 }, { 28, 63, 0 } } }, { { { 40, 0, 1 }, { 29, 62, 0 } } }, { { { 40, 0, 0 }, { 32, 56, 0 } } }, { { { 40, 0, 1 }, { 29, 63, 0 } } }, { { { 40, 0, 2 }, { 30, 62, 0 } } }, { { { 41, 0, 1 }, { 30, 63, 0 } } }, { { { 41, 0, 0 }, { 32, 59, 0 } } }, { { { 41, 0, 1 }, { 31, 62, 0 } } }, { { { 41, 0, 2 }, { 31, 63, 0 } } }, { { { 42, 0, 1 }, { 32, 61, 0 } } }, { { { 42, 0, 0 }, { 32, 62, 0 } } }, { { { 42, 0, 1 }, { 32, 63, 0 } } }, { { { 42, 0, 2 }, { 41, 46, 0 } } }, { { { 43, 0, 1 }, { 33, 62, 0 } } }, { { { 43, 0, 0 }, { 33, 63, 0 } } }, { { { 43, 0, 1 }, { 34, 62, 0 } } }, { { { 43, 0, 2 }, { 42, 47, 0 } } }, { { { 44, 0, 1 }, { 34, 63, 0 } } }, { { { 44, 0, 0 }, { 35, 62, 0 } } }, { { { 44, 0, 1 }, { 35, 63, 0 } } }, { { { 44, 0, 2 }, { 44, 46, 0 } } }, { { { 45, 0, 1 }, { 36, 62, 0 } } }, { { { 45, 0, 0 }, { 36, 63, 0 } } }, { { { 45, 0, 1 }, { 37, 62, 0 } } }, { { { 45, 0, 2 }, { 45, 47, 0 } } }, { { { 46, 0, 1 }, { 37, 63, 0 } } }, { { { 46, 0, 0 }, { 38, 62, 0 } } }, { { { 46, 0, 1 }, { 38, 63, 0 } } }, { { { 46, 0, 2 }, { 47, 46, 0 } } }, { { { 47, 0, 1 }, { 39, 62, 0 } } }, { { { 47, 0, 0 }, { 39, 63, 0 } } }, { { { 47, 0, 1 }, { 40, 62, 0 } } }, { { { 47, 0, 2 }, { 48, 46, 0 } } }, { { { 48, 0, 2 }, { 40, 63, 0 } } }, { { { 48, 0, 1 }, { 41, 62, 0 } } }, { { { 48, 0, 0 }, { 41, 63, 0 } } }, { { { 48, 0, 1 }, { 48, 49, 0 } } }, { { { 48, 0, 2 }, { 42, 62, 0 } } }, { { { 49, 0, 1 }, { 42, 63, 0 } } }, { { { 49, 0, 0 }, { 43, 62, 0 } } }, { { { 49, 0, 1 }, { 48, 52, 0 } } }, { { { 49, 0, 2 }, { 43, 63, 0 } } }, { { { 50, 0, 1 }, { 44, 62, 0 } } }, { { { 50, 0, 0 }, { 44, 63, 0 } } }, { { { 50, 0, 1 }, { 48, 55, 0 } } }, { { { 50, 0, 2 }, { 45, 62, 0 } } }, { { { 51, 0, 1 }, { 45, 63, 0 } } }, { { { 51, 0, 0 }, { 46, 62, 0 } } }, { { { 51, 0, 1 }, { 48, 58, 0 } } }, { { { 51, 0, 2 }, { 46, 63, 0 } } }, { { { 52, 0, 1 }, { 47, 62, 0 } } }, { { { 52, 0, 0 }, { 47, 63, 0 } } }, { { { 52, 0, 1 }, { 48, 61, 0 } } }, { { { 52, 0, 2 }, { 48, 62, 0 } } }, { { { 53, 0, 1 }, { 56, 47, 0 } } }, { { { 53, 0, 0 }, { 48, 63, 0 } } }, { { { 53, 0, 1 }, { 49, 62, 0 } } }, { { { 53, 0, 2 }, { 49, 63, 0 } } }, { { { 54, 0, 1 }, { 58, 46, 0 } } }, { { { 54, 0, 0 }, { 50, 62, 0 } } }, { { { 54, 0, 1 }, { 50, 63, 0 } } }, { { { 54, 0, 2 }, { 51, 62, 0 } } }, { { { 55, 0, 1 }, { 59, 47, 0 } } }, { { { 55, 0, 0 }, { 51, 63, 0 } } }, { { { 55, 0, 1 }, { 52, 62, 0 } } }, { { { 55, 0, 2 }, { 52, 63, 0 } } }, { { { 56, 0, 1 }, { 61, 46, 0 } } }, { { { 56, 0, 0 }, { 53, 62, 0 } } }, { { { 56, 0, 1 }, { 53, 63, 0 } } }, { { { 56, 0, 2 }, { 54, 62, 0 } } }, { { { 57, 0, 1 }, { 62, 47, 0 } } }, { { { 57, 0, 0 }, { 54, 63, 0 } } }, { { { 57, 0, 1 }, { 55, 62, 0 } } }, { { { 57, 0, 2 }, { 55, 63, 0 } } }, { { { 58, 0, 1 }, { 56, 62, 1 } } }, { { { 58, 0, 0 }, { 56, 62, 0 } } }, { { { 58, 0, 1 }, { 56, 63, 0 } } }, { { { 58, 0, 2 }, { 57, 62, 0 } } }, { { { 59, 0, 1 }, { 57, 63, 1 } } }, { { { 59, 0, 0 }, { 57, 63, 0 } } }, { { { 59, 0, 1 }, { 58, 62, 0 } } }, { { { 59, 0, 2 }, { 58, 63, 0 } } }, { { { 60, 0, 1 }, { 59, 62, 1 } } }, { { { 60, 0, 0 }, { 59, 62, 0 } } }, { { { 60, 0, 1 }, { 59, 63, 0 } } }, { { { 60, 0, 2 }, { 60, 62, 0 } } }, { { { 61, 0, 1 }, { 60, 63, 1 } } }, { { { 61, 0, 0 }, { 60, 63, 0 } } }, { { { 61, 0, 1 }, { 61, 62, 0 } } }, { { { 61, 0, 2 }, { 61, 63, 0 } } }, { { { 62, 0, 1 }, { 62, 62, 1 } } }, { { { 62, 0, 0 }, { 62, 62, 0 } } }, { { { 62, 0, 1 }, { 62, 63, 0 } } }, { { { 62, 0, 2 }, { 63, 62, 0 } } }, { { { 63, 0, 1 }, { 63, 63, 1 } } }, { { { 63, 0, 0 }, { 63, 63, 0 } } } }; static const DDSSingleColorLookup* DDS_LOOKUP[] = { DDSLookup_5_4, DDSLookup_6_4, DDSLookup_5_4 }; static const unsigned char BC7_weight2[] = { 0, 21, 43, 64 }; static const unsigned char BC7_weight3[] = { 0, 9, 18, 27, 37, 46, 55, 64 }; static const unsigned char BC7_weight4[] = { 0, 4, 9, 13, 17, 21, 26, 30, 34, 38, 43, 47, 51, 55, 60, 64 }; /* stores info for each mode of BC7 */ static const BC7ModeInfo BC7_mode_info[8] = { { 4, 3, 4, 0, 6, 3, 0 }, /* mode 0 */ { 6, 2, 6, 0, 2, 3, 0 }, /* mode 1 */ { 6, 3, 5, 0, 0, 2, 0 }, /* mode 2 */ { 6, 2, 7, 0, 4, 2, 0 }, /* mode 3 */ { 0, 1, 5, 6, 0, 2, 3 }, /* mode 4 */ { 0, 1, 7, 8, 0, 2, 2 }, /* mode 5 */ { 0, 1, 7, 7, 2, 4, 0 }, /* mode 6 */ { 6, 2, 5, 5, 4, 2, 0 }, /* mode 7 */ }; static const unsigned char BC7_partition_table[2][64][16] = { { /* BC7 Partition Set for 2 Subsets */ { 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1 }, { 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1 }, { 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1 }, { 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1 }, { 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1 }, { 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1 }, { 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1 }, { 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1 }, { 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, { 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1 }, { 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1 }, { 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1 }, { 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1 }, { 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0 }, { 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0 }, { 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0 }, { 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1 }, { 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0 }, { 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0 }, { 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0 }, { 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0 }, { 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0 }, { 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0 }, { 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0 }, { 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0 }, { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 }, { 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1 }, { 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0 }, { 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0 }, { 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0 }, { 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0 }, { 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1 }, { 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1 }, { 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0 }, { 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0 }, { 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0 }, { 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0 }, { 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0 }, { 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1 }, { 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1 }, { 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0 }, { 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0 }, { 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0 }, { 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0 }, { 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1 }, { 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1 }, { 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0 }, { 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0 }, { 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1 }, { 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1 }, { 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1 }, { 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1 }, { 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1 }, { 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0 }, { 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0 }, { 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1 } }, { /* BC7 Partition Set for 3 Subsets */ { 0, 0, 1, 1, 0, 0, 1, 1, 0, 2, 2, 1, 2, 2, 2, 2 }, { 0, 0, 0, 1, 0, 0, 1, 1, 2, 2, 1, 1, 2, 2, 2, 1 }, { 0, 0, 0, 0, 2, 0, 0, 1, 2, 2, 1, 1, 2, 2, 1, 1 }, { 0, 2, 2, 2, 0, 0, 2, 2, 0, 0, 1, 1, 0, 1, 1, 1 }, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 1, 1, 2, 2 }, { 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 2, 2 }, { 0, 0, 2, 2, 0, 0, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1 }, { 0, 0, 1, 1, 0, 0, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1 }, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2 }, { 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2 }, { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2 }, { 0, 0, 1, 2, 0, 0, 1, 2, 0, 0, 1, 2, 0, 0, 1, 2 }, { 0, 1, 1, 2, 0, 1, 1, 2, 0, 1, 1, 2, 0, 1, 1, 2 }, { 0, 1, 2, 2, 0, 1, 2, 2, 0, 1, 2, 2, 0, 1, 2, 2 }, { 0, 0, 1, 1, 0, 1, 1, 2, 1, 1, 2, 2, 1, 2, 2, 2 }, { 0, 0, 1, 1, 2, 0, 0, 1, 2, 2, 0, 0, 2, 2, 2, 0 }, { 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 2, 1, 1, 2, 2 }, { 0, 1, 1, 1, 0, 0, 1, 1, 2, 0, 0, 1, 2, 2, 0, 0 }, { 0, 0, 0, 0, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2 }, { 0, 0, 2, 2, 0, 0, 2, 2, 0, 0, 2, 2, 1, 1, 1, 1 }, { 0, 1, 1, 1, 0, 1, 1, 1, 0, 2, 2, 2, 0, 2, 2, 2 }, { 0, 0, 0, 1, 0, 0, 0, 1, 2, 2, 2, 1, 2, 2, 2, 1 }, { 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 2, 2, 0, 1, 2, 2 }, { 0, 0, 0, 0, 1, 1, 0, 0, 2, 2, 1, 0, 2, 2, 1, 0 }, { 0, 1, 2, 2, 0, 1, 2, 2, 0, 0, 1, 1, 0, 0, 0, 0 }, { 0, 0, 1, 2, 0, 0, 1, 2, 1, 1, 2, 2, 2, 2, 2, 2 }, { 0, 1, 1, 0, 1, 2, 2, 1, 1, 2, 2, 1, 0, 1, 1, 0 }, { 0, 0, 0, 0, 0, 1, 1, 0, 1, 2, 2, 1, 1, 2, 2, 1 }, { 0, 0, 2, 2, 1, 1, 0, 2, 1, 1, 0, 2, 0, 0, 2, 2 }, { 0, 1, 1, 0, 0, 1, 1, 0, 2, 0, 0, 2, 2, 2, 2, 2 }, { 0, 0, 1, 1, 0, 1, 2, 2, 0, 1, 2, 2, 0, 0, 1, 1 }, { 0, 0, 0, 0, 2, 0, 0, 0, 2, 2, 1, 1, 2, 2, 2, 1 }, { 0, 0, 0, 0, 0, 0, 0, 2, 1, 1, 2, 2, 1, 2, 2, 2 }, { 0, 2, 2, 2, 0, 0, 2, 2, 0, 0, 1, 2, 0, 0, 1, 1 }, { 0, 0, 1, 1, 0, 0, 1, 2, 0, 0, 2, 2, 0, 2, 2, 2 }, { 0, 1, 2, 0, 0, 1, 2, 0, 0, 1, 2, 0, 0, 1, 2, 0 }, { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0, 0 }, { 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0 }, { 0, 1, 2, 0, 2, 0, 1, 2, 1, 2, 0, 1, 0, 1, 2, 0 }, { 0, 0, 1, 1, 2, 2, 0, 0, 1, 1, 2, 2, 0, 0, 1, 1 }, { 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0, 0, 1, 1 }, { 0, 1, 0, 1, 0, 1, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2 }, { 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 2, 1, 2, 1, 2, 1 }, { 0, 0, 2, 2, 1, 1, 2, 2, 0, 0, 2, 2, 1, 1, 2, 2 }, { 0, 0, 2, 2, 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 1, 1 }, { 0, 2, 2, 0, 1, 2, 2, 1, 0, 2, 2, 0, 1, 2, 2, 1 }, { 0, 1, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 0, 1, 0, 1 }, { 0, 0, 0, 0, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1 }, { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 2, 2, 2 }, { 0, 2, 2, 2, 0, 1, 1, 1, 0, 2, 2, 2, 0, 1, 1, 1 }, { 0, 0, 0, 2, 1, 1, 1, 2, 0, 0, 0, 2, 1, 1, 1, 2 }, { 0, 0, 0, 0, 2, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2 }, { 0, 2, 2, 2, 0, 1, 1, 1, 0, 1, 1, 1, 0, 2, 2, 2 }, { 0, 0, 0, 2, 1, 1, 1, 2, 1, 1, 1, 2, 0, 0, 0, 2 }, { 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 2, 2, 2, 2 }, { 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 1, 2, 2, 1, 1, 2 }, { 0, 1, 1, 0, 0, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 2 }, { 0, 0, 2, 2, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 2, 2 }, { 0, 0, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, 0, 0, 2, 2 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 1, 2 }, { 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 1 }, { 0, 2, 2, 2, 1, 2, 2, 2, 0, 2, 2, 2, 1, 2, 2, 2 }, { 0, 1, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 }, { 0, 1, 1, 1, 2, 0, 1, 1, 2, 2, 0, 1, 2, 2, 2, 0 } } }; static const unsigned char BC7_anchor_index_table[4][64] = { /* Anchor index values for the first subset */ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, /* Anchor index values for the second subset of two-subset partitioning */ { 15,15,15,15,15,15,15,15, 15,15,15,15,15,15,15,15, 15, 2, 8, 2, 2, 8, 8,15, 2, 8, 2, 2, 8, 8, 2, 2, 15,15, 6, 8, 2, 8,15,15, 2, 8, 2, 2, 2,15,15, 6, 6, 2, 6, 8,15,15, 2, 2, 15,15,15,15,15, 2, 2,15 }, /* Anchor index values for the second subset of three-subset partitioning */ { 3, 3,15,15, 8, 3,15,15, 8, 8, 6, 6, 6, 5, 3, 3, 3, 3, 8,15, 3, 3, 6,10, 5, 8, 8, 6, 8, 5,15,15, 8,15, 3, 5, 6,10, 8,15, 15, 3,15, 5,15,15,15,15, 3,15, 5, 5, 5, 8, 5,10, 5,10, 8,13,15,12, 3, 3 }, /* Anchor index values for the third subset of three-subset partitioning */ { 15, 8, 8, 3,15,15, 3, 8, 15,15,15,15,15,15,15, 8, 15, 8,15, 3,15, 8,15, 8, 3,15, 6,10,15,15,10, 8, 15, 3,15,10,10, 8, 9,10, 6,15, 8,15, 3, 6, 6, 8, 15, 3,15,15,15,15,15,15, 15,15,15,15, 3,15,15, 8 } }; /* Macros */ #define C565_r(x) (((x) & 0xF800) >> 11) #define C565_g(x) (((x) & 0x07E0) >> 5) #define C565_b(x) ((x) & 0x001F) #define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2)) #define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4)) #define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2)) #define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1) #define FixRange(min, max, steps) \ if (min > max) \ min = max; \ if ((ssize_t) max - min < steps) \ max = MagickMin(min + steps, 255); \ if ((ssize_t) max - min < steps) \ min = MagickMax(0, (ssize_t) max - steps) #define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z) #define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \ = value #define VectorInit3(vector, value) vector.x = vector.y = vector.z = value #define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \ g && mask.b_bitmask == b && mask.alpha_bitmask == a) /* Forward declarations */ static MagickBooleanType WriteDDSImage(const ImageInfo *,Image *,ExceptionInfo *); static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x + right.x; destination->y = left.y + right.y; destination->z = left.z + right.z; destination->w = left.w + right.w; } static inline void VectorClamp(DDSVector4 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); value->w = MagickMin(1.0f,MagickMax(0.0f,value->w)); } static inline void VectorClamp3(DDSVector3 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); } static inline void VectorCopy43(const DDSVector4 source, DDSVector3 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; } static inline void VectorCopy44(const DDSVector4 source, DDSVector4 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; destination->w = source.w; } static inline void VectorNegativeMultiplySubtract(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = c.x - (a.x * b.x); destination->y = c.y - (a.y * b.y); destination->z = c.z - (a.z * b.z); destination->w = c.w - (a.w * b.w); } static inline void VectorMultiply(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; destination->w = left.w * right.w; } static inline void VectorMultiply3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; } static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; destination->w = (a.w * b.w) + c.w; } static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b, const DDSVector3 c, DDSVector3 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; } static inline void VectorReciprocal(const DDSVector4 value, DDSVector4 *destination) { destination->x = 1.0f / value.x; destination->y = 1.0f / value.y; destination->z = 1.0f / value.z; destination->w = 1.0f / value.w; } static inline void VectorSubtract(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; destination->w = left.w - right.w; } static inline void VectorSubtract3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; } static inline void VectorTruncate(DDSVector4 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w); } static inline void VectorTruncate3(DDSVector3 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); } static inline size_t ClampToLimit(const float value, const size_t limit) { size_t result = (int) (value + 0.5f); if (result < 0.0f) return(0); if (result > limit) return(limit); return result; } static inline size_t ColorTo565(const DDSVector3 point) { size_t r = ClampToLimit(31.0f*point.x,31); size_t g = ClampToLimit(63.0f*point.y,63); size_t b = ClampToLimit(31.0f*point.z,31); return (r << 11) | (g << 5) | b; } static inline unsigned char GetSubsetIndex(unsigned char numSubsets, unsigned char partition_id,size_t pixelIndex) { if (numSubsets == 2) return BC7_partition_table[0][partition_id][pixelIndex]; if (numSubsets == 3) return BC7_partition_table[1][partition_id][pixelIndex]; return 0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s D D S % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsDDS() returns MagickTrue if the image format type, identified by the % magick string, is DDS. % % The format of the IsDDS method is: % % MagickBooleanType IsDDS(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((char *) magick,"DDS ", 4) == 0) return(MagickTrue); return(MagickFalse); } static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info) { size_t hdr_size, required; /* Seek to start of header */ (void) SeekBlob(image, 4, SEEK_SET); /* Check header field */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 124) return MagickFalse; /* Fill in DDS info struct */ dds_info->flags = ReadBlobLSBLong(image); /* Check required flags */ required=(size_t) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); if ((dds_info->flags & required) != required) return MagickFalse; dds_info->height = ReadBlobLSBLong(image); dds_info->width = ReadBlobLSBLong(image); dds_info->pitchOrLinearSize = ReadBlobLSBLong(image); dds_info->depth = ReadBlobLSBLong(image); dds_info->mipmapcount = ReadBlobLSBLong(image); (void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */ /* Read pixel format structure */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 32) return MagickFalse; dds_info->pixelformat.flags = ReadBlobLSBLong(image); dds_info->pixelformat.fourcc = ReadBlobLSBLong(image); dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image); dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image); dds_info->ddscaps1 = ReadBlobLSBLong(image); dds_info->ddscaps2 = ReadBlobLSBLong(image); (void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */ /* Read optional DX10 header if available */ if ((dds_info->pixelformat.flags & DDPF_FOURCC) && (dds_info->pixelformat.fourcc == FOURCC_DX10)) { dds_info->extFormat = ReadBlobLSBLong(image); dds_info->extDimension = ReadBlobLSBLong(image); dds_info->extFlags = ReadBlobLSBLong(image); dds_info->extArraySize = ReadBlobLSBLong(image); dds_info->extFlags2 = ReadBlobLSBLong(image); } else { dds_info->extFormat = 0; dds_info->extDimension = 0; dds_info->extFlags = 0; dds_info->extArraySize = 0; dds_info->extFlags2 = 0; } return(MagickTrue); } static MagickBooleanType SetDXT1Pixels(Image *image,ssize_t x,ssize_t y, DDSColors colors,size_t bits,Quantum *q) { ssize_t i; ssize_t j; unsigned char code; for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code=(unsigned char) ((bits >> ((j*4+i)*2)) & 0x3); SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); SetPixelOpacity(image,ScaleCharToQuantum(colors.a[code]),q); if ((colors.a[code] != 0) && (image->alpha_trait == UndefinedPixelTrait)) return(MagickFalse); q+=GetPixelChannels(image); } } } return(MagickTrue); } static MagickBooleanType ReadMipmaps(const ImageInfo *image_info,Image *image, const DDSInfo *dds_info,DDSPixelDecoder decoder,ExceptionInfo *exception) { MagickBooleanType status; /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } status=MagickTrue; if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { AcquireNextImage(image_info,image,exception); if (image->next == (Image *) NULL) return(MagickFalse); image->next->alpha_trait=image->alpha_trait; image=SyncNextImageInList(image); status=SetImageExtent(image,w,h,exception); if (status == MagickFalse) break; status=decoder(image,dds_info,exception); if (status == MagickFalse) break; if ((w == 1) && (h == 1)) break; w=DIV2(w); h=DIV2(h); } } return(status); } static void CalculateColors(unsigned short c0, unsigned short c1, DDSColors *c, MagickBooleanType ignoreAlpha) { c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0; c->r[0] = (unsigned char) C565_red(c0); c->g[0] = (unsigned char) C565_green(c0); c->b[0] = (unsigned char) C565_blue(c0); c->r[1] = (unsigned char) C565_red(c1); c->g[1] = (unsigned char) C565_green(c1); c->b[1] = (unsigned char) C565_blue(c1); if (ignoreAlpha != MagickFalse || c0 > c1) { c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3); c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3); c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3); c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3); c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3); c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3); } else { c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2); c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2); c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2); c->r[3] = c->g[3] = c->b[3] = 0; c->a[3] = 255; } } static MagickBooleanType ReadDXT1Pixels(Image *image, const DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; Quantum *q; ssize_t x; size_t bits; ssize_t y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x), MagickMin(4,image->rows-y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read 8 bytes of data from the image */ c0=ReadBlobLSBShort(image); c1=ReadBlobLSBShort(image); bits=ReadBlobLSBLong(image); CalculateColors(c0,c1,&colors,MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ if (SetDXT1Pixels(image,x,y,colors,bits,q) == MagickFalse) { /* Correct alpha */ SetImageAlpha(image,QuantumRange,exception); q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x), MagickMin(4,image->rows-y),exception); if (q != (Quantum *) NULL) SetDXT1Pixels(image,x,y,colors,bits,q); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } /* Skip the mipmap images for compressed (DXTn) dds files */ static MagickBooleanType SkipDXTMipmaps(Image *image,const DDSInfo *dds_info, int texel_size,ExceptionInfo *exception) { /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { MagickOffsetType offset; ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset=(MagickOffsetType)((w+3)/4)*((h+3)/4)*texel_size; if (SeekBlob(image,offset,SEEK_CUR) < 0) break; w=DIV2(w); h=DIV2(h); if ((w == 1) && (h == 1)) break; } } return(MagickTrue); } static MagickBooleanType ReadDXT1(const ImageInfo *image_info,Image *image, const DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT1Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT1Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,8,exception)); } static MagickBooleanType ReadDXT3Pixels(Image *image, const DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; Quantum *q; ssize_t i, x; unsigned char alpha; size_t a0, a1, bits, code; ssize_t j, y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x), MagickMin(4, image->rows - y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read alpha values (8 bytes) */ a0 = ReadBlobLSBLong(image); a1 = ReadBlobLSBLong(image); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); /* Extract alpha value: multiply 0..15 by 17 to get range 0..255 */ if (j < 2) alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf); else alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q); q+=GetPixelChannels(image); } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadDXT3(const ImageInfo *image_info,Image *image, const DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT3Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT3Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadDXT5Pixels(Image *image, const DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; MagickSizeType alpha_bits; Quantum *q; ssize_t i, x; unsigned char a0, a1; size_t alpha, bits, code, alpha_code; ssize_t j, y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x), MagickMin(4, image->rows - y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read alpha values (8 bytes) */ a0 = (unsigned char) ReadBlobByte(image); a1 = (unsigned char) ReadBlobByte(image); alpha_bits = (MagickSizeType)ReadBlobLSBLong(image); alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); /* Extract alpha value */ alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7; if (alpha_code == 0) alpha = a0; else if (alpha_code == 1) alpha = a1; else if (a0 > a1) alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7; else if (alpha_code == 6) alpha = 0; else if (alpha_code == 7) alpha = 255; else alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q); q+=GetPixelChannels(image); } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadDXT5(const ImageInfo *image_info,Image *image, const DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT5Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT5Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,16,exception)); } static unsigned char GetBit(const unsigned char *block,size_t *start_bit) { size_t base, index; index=(*start_bit) >> 3; base=(*start_bit) - (index << 3); (*start_bit)++; if (index > 15) return(0); return((block[index] >> base) & 0x01); } static unsigned char GetBits(const unsigned char *block,size_t *start_bit, unsigned char num_bits) { size_t base, first_bits, index, next_bits; unsigned char ret; index=(*start_bit) >> 3; base=(*start_bit)-(index << 3); if (index > 15) return(0); if (base + num_bits > 8) { first_bits=8-base; next_bits=num_bits-first_bits; ret=((block[index] >> base) | (((block[index + 1]) & ((1u << next_bits) - 1)) << first_bits)); } else { ret=((block[index] >> base) & ((1 << num_bits) - 1)); } (*start_bit)+=num_bits; return(ret); } static MagickBooleanType IsPixelAnchorIndex(unsigned char subset_index, unsigned char num_subsets,size_t pixelIndex,unsigned char partition_id) { size_t table_index; /* for first subset */ if (subset_index == 0) table_index=0; /* for second subset of two subset partitioning */ else if ((subset_index == 1) && (num_subsets == 2)) table_index=1; /* for second subset of three subset partitioning */ else if ((subset_index == 1) && (num_subsets == 3)) table_index=2; /* for third subset of three subset partitioning */ else table_index=3; if (BC7_anchor_index_table[table_index][partition_id] == pixelIndex) return(MagickTrue); else return(MagickFalse); } static void ReadEndpoints(BC7Colors *endpoints,const unsigned char *block, size_t mode,size_t *start_bit) { MagickBooleanType has_alpha, has_pbits; unsigned char alpha_bits, color_bits, pbit, pbit0, pbit1; size_t num_subsets, i; num_subsets=(size_t) BC7_mode_info[mode].num_subsets; color_bits=BC7_mode_info[mode].color_precision; /* red */ for (i=0; i < num_subsets * 2; i++) endpoints->r[i]=GetBits(block,start_bit,color_bits); /* green */ for (i=0; i < num_subsets * 2; i++) endpoints->g[i]=GetBits(block,start_bit,color_bits); /* blue */ for (i=0; i < num_subsets * 2; i++) endpoints->b[i]=GetBits(block,start_bit,color_bits); /* alpha */ alpha_bits=BC7_mode_info[mode].alpha_precision; has_alpha=mode >= 4 ? MagickTrue : MagickFalse; if (has_alpha != MagickFalse) { for (i=0; i < num_subsets * 2; i++) endpoints->a[i]=GetBits(block,start_bit,alpha_bits); } /* handle modes that have p bits */ has_pbits=(mode == 0) || (mode == 1) || (mode == 3) || (mode == 6) || (mode == 7) ? MagickTrue : MagickFalse; if (has_pbits != MagickFalse) { for (i=0; i < num_subsets * 2; i++) { endpoints->r[i] <<= 1; endpoints->g[i] <<= 1; endpoints->b[i] <<= 1; endpoints->a[i] <<= 1; } /* mode 1 shares a p-bit for both endpoints */ if (mode == 1) { pbit0=GetBit(block,start_bit); pbit1=GetBit(block,start_bit); endpoints->r[0] |= pbit0; endpoints->g[0] |= pbit0; endpoints->b[0] |= pbit0; endpoints->r[1] |= pbit0; endpoints->g[1] |= pbit0; endpoints->b[1] |= pbit0; endpoints->r[2] |= pbit1; endpoints->g[2] |= pbit1; endpoints->b[2] |= pbit1; endpoints->r[3] |= pbit1; endpoints->g[3] |= pbit1; endpoints->b[3] |= pbit1; } else { for (i=0; i < num_subsets * 2; i++) { pbit=GetBit(block,start_bit); endpoints->r[i] |= pbit; endpoints->g[i] |= pbit; endpoints->b[i] |= pbit; endpoints->a[i] |= pbit; } } } /* 1 bit increased due to the pbit */ if (has_pbits != MagickFalse) { color_bits++; alpha_bits++; } /* color and alpha bit shifting so that MSB lies in bit 7 */ for (i=0; i < num_subsets * 2; i++) { endpoints->r[i] <<= (8 - color_bits); endpoints->g[i] <<= (8 - color_bits); endpoints->b[i] <<= (8 - color_bits); endpoints->a[i] <<= (8 - alpha_bits); endpoints->r[i]=endpoints->r[i] | (endpoints->r[i] >> color_bits); endpoints->g[i]=endpoints->g[i] | (endpoints->g[i] >> color_bits); endpoints->b[i]=endpoints->b[i] | (endpoints->b[i] >> color_bits); endpoints->a[i]=endpoints->a[i] | (endpoints->a[i] >> alpha_bits); } if (has_alpha == MagickFalse) { for (i=0; i < num_subsets * 2; i++) endpoints->a[i]=255; } } static MagickBooleanType ReadBC7Pixels(Image *image, const DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { BC7Colors colors; Quantum *q; size_t mode, start_bit; ssize_t count, i, x, y; unsigned char a, alpha_indices[16], b, block[16], c0, c1, color_indices[16], g, index_prec, index2_prec, num_bits, num_subsets, partition_id, r, rotation, selector_bit, subset_indices[16], weight; magick_unreferenced(dds_info); memset(alpha_indices,0,sizeof(alpha_indices)); memset(block,0,sizeof(block)); memset(color_indices,0,sizeof(color_indices)); memset(subset_indices,0,sizeof(subset_indices)); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { size_t area; /* Get 4x4 patch of pixels to write on */ q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x), MagickMin(4,image->rows-y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read 16 bytes of data from the image */ count=ReadBlob(image,16,block); if (count != 16) return(MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Get the mode of the block */ start_bit=0; while (start_bit <= 8 && !GetBit(block, &start_bit)) {} mode=start_bit-1; if (mode > 7) return(MagickFalse); num_subsets=BC7_mode_info[mode].num_subsets; partition_id=0; /* only these modes have more than 1 subset */ if ((mode == 0) || (mode == 1) || (mode == 2) || (mode == 3) || (mode == 7)) { partition_id=GetBits(block,&start_bit,BC7_mode_info[mode].partition_bits); if (partition_id > 63) return(MagickFalse); } rotation=0; if ((mode == 4) || (mode == 5)) rotation=GetBits(block,&start_bit,2); selector_bit=0; if (mode == 4) selector_bit=GetBit(block, &start_bit); ReadEndpoints(&colors,block,mode,&start_bit); index_prec=BC7_mode_info[mode].index_precision; index2_prec=BC7_mode_info[mode].index2_precision; if ((mode == 4) && (selector_bit == 1)) { index_prec=3; alpha_indices[0]=GetBit(block,&start_bit); for (i = 1; i < 16; i++) alpha_indices[i]=GetBits(block,&start_bit,2); } /* get color and subset indices */ for (i=0; i < 16; i++) { subset_indices[i]=GetSubsetIndex(num_subsets,partition_id,i); num_bits=index_prec; if (IsPixelAnchorIndex(subset_indices[i],num_subsets,i,partition_id)) num_bits--; color_indices[i]=GetBits(block,&start_bit,num_bits); } /* get alpha indices if the block has it */ if ((mode == 5) || ((mode == 4) && (selector_bit == 0))) { alpha_indices[0]=GetBits(block,&start_bit,index2_prec - 1); for (i=1; i < 16; i++) alpha_indices[i]=GetBits(block,&start_bit,index2_prec); } /* Write the pixels */ area=MagickMin(MagickMin(4,image->columns-x)*MagickMin(4,image->rows-y), 16); for (i=0; i < (ssize_t) area; i++) { unsigned char c2; c0=2 * subset_indices[i]; c1=(2 * subset_indices[i]) + 1; c2=color_indices[i]; weight=64; /* Color Interpolation */ switch(index_prec) { case 2: if (c2 < sizeof(BC7_weight2)) weight=BC7_weight2[c2]; break; case 3: if (c2 < sizeof(BC7_weight3)) weight=BC7_weight3[c2]; break; default: if (c2 < sizeof(BC7_weight4)) weight=BC7_weight4[c2]; } r=((64 - weight) * colors.r[c0] + weight * colors.r[c1] + 32) >> 6; g=((64 - weight) * colors.g[c0] + weight * colors.g[c1] + 32) >> 6; b=((64 - weight) * colors.b[c0] + weight * colors.b[c1] + 32) >> 6; a=((64 - weight) * colors.a[c0] + weight * colors.a[c1] + 32) >> 6; /* Interpolate alpha for mode 4 and 5 blocks */ if (mode == 4 || mode == 5) { unsigned char a0; a0=alpha_indices[i]; if (a0 < sizeof(BC7_weight2)) weight=BC7_weight2[a0]; if ((mode == 4) && (selector_bit == 0) && (a0 < sizeof(BC7_weight3))) weight=BC7_weight3[a0]; if ((c0 < sizeof(colors.a)) && (c1 < sizeof(colors.a))) a=((64 - weight) * colors.a[c0] + weight * colors.a[c1] + 32) >> 6; } switch (rotation) { case 1: Swap(a,r); break; case 2: Swap(a,g); break; case 3: Swap(a,b); break; } SetPixelRed(image,ScaleCharToQuantum((unsigned char)r),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char)g),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char)b),q); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)a),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadBC7(const ImageInfo *image_info,Image *image, const DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadBC7Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadBC7Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadUncompressedRGBPixels(Image *image, const DDSInfo *dds_info,ExceptionInfo *exception) { Quantum *q; ssize_t x, y; unsigned short color; for (y = 0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) return(MagickFalse); for (x = 0; x < (ssize_t) image->columns; x++) { if (dds_info->pixelformat.rgb_bitcount == 8 || dds_info->extFormat == DXGI_FORMAT_R8_UNORM) SetPixelGray(image,ScaleCharToQuantum(ReadBlobByte(image)),q); else if (dds_info->pixelformat.rgb_bitcount == 16 || dds_info->extFormat == DXGI_FORMAT_B5G6R5_UNORM) { color=ReadBlobShort(image); SetPixelRed(image,ScaleCharToQuantum((unsigned char) (((color >> 11)/31.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 5) >> 10)/63.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255)),q); } else { SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); if (dds_info->pixelformat.rgb_bitcount == 32 || dds_info->extFormat == DXGI_FORMAT_B8G8R8X8_UNORM) (void) ReadBlobByte(image); } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } /* Skip the mipmap images for uncompressed (RGB or RGBA) dds files */ static MagickBooleanType SkipRGBMipmaps(Image *image,const DDSInfo *dds_info, int pixel_size,ExceptionInfo *exception) { /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { MagickOffsetType offset; ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset=(MagickOffsetType)w*h*pixel_size; if (SeekBlob(image,offset,SEEK_CUR) < 0) break; w=DIV2(w); h=DIV2(h); if ((w == 1) && (h == 1)) break; } } return(MagickTrue); } static MagickBooleanType ReadUncompressedRGB(const ImageInfo *image_info, Image *image,const DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (dds_info->pixelformat.rgb_bitcount == 8 || dds_info->extFormat == DXGI_FORMAT_R8_UNORM) (void) SetImageType(image,GrayscaleType,exception); else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask( dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000)) ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); if (ReadUncompressedRGBPixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBPixels, exception)); else return(SkipRGBMipmaps(image,dds_info,3,exception)); } static MagickBooleanType ReadUncompressedRGBAPixels(Image *image, const DDSInfo *dds_info,ExceptionInfo *exception) { Quantum *q; ssize_t alphaBits, x, y; unsigned short color; alphaBits=0; if (dds_info->pixelformat.rgb_bitcount == 16) { if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000)) alphaBits=1; else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00)) { alphaBits=2; (void) SetImageType(image,GrayscaleAlphaType,exception); } else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000)) alphaBits=4; else ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); } if (dds_info->extFormat == DXGI_FORMAT_B5G5R5A1_UNORM) alphaBits=1; for (y = 0; y < (ssize_t) image->rows; y++) { q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception); if (q == (Quantum *) NULL) return(MagickFalse); for (x = 0; x < (ssize_t) image->columns; x++) { if (dds_info->pixelformat.rgb_bitcount == 16 || dds_info->extFormat == DXGI_FORMAT_B5G5R5A1_UNORM) { color=ReadBlobShort(image); if (alphaBits == 1) { SetPixelAlpha(image,(color & (1 << 15)) ? QuantumRange : 0,q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 1) >> 11)/31.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 6) >> 11)/31.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255)),q); } else if (alphaBits == 2) { SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) (color >> 8)),q); SetPixelGray(image,ScaleCharToQuantum((unsigned char)color),q); } else { SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) (((color >> 12)/15.0)*255)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 4) >> 12)/15.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 8) >> 12)/15.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 12) >> 12)/15.0)*255)),q); } } else if (dds_info->extFormat == DXGI_FORMAT_R8G8B8A8_UNORM || IsBitMask(dds_info->pixelformat,0x000000ff,0x0000ff00,0x00ff0000,0xff000000)) { SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); } else { SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadUncompressedRGBA(const ImageInfo *image_info, Image *image,const DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadUncompressedRGBAPixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBAPixels, exception)); else return(SkipRGBMipmaps(image,dds_info,4,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadDDSImage() reads a DirectDraw Surface image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadDDSImage method is: % % Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: The image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) { const char *option; CompressionType compression; DDSInfo dds_info; DDSDecoder *decoder; Image *image; MagickBooleanType status, cubemap, volume, read_mipmaps; PixelTrait alpha_trait; size_t n, num_images; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cubemap=MagickFalse, volume=MagickFalse, read_mipmaps=MagickFalse; image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Initialize image structure. */ if (ReadDDSInfo(image, &dds_info) != MagickTrue) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP) cubemap = MagickTrue; if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0) volume = MagickTrue; /* Determine pixel format */ if (dds_info.pixelformat.flags & DDPF_RGB) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { alpha_trait = BlendPixelTrait; decoder = ReadUncompressedRGBA; } else { alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_LUMINANCE) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { /* Not sure how to handle this */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } else { alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_FOURCC) { switch (dds_info.pixelformat.fourcc) { case FOURCC_DXT1: { alpha_trait = UndefinedPixelTrait; compression = DXT1Compression; decoder = ReadDXT1; break; } case FOURCC_DXT3: { alpha_trait = BlendPixelTrait; compression = DXT3Compression; decoder = ReadDXT3; break; } case FOURCC_DXT5: { alpha_trait = BlendPixelTrait; compression = DXT5Compression; decoder = ReadDXT5; break; } case FOURCC_DX10: { if (dds_info.extDimension != DDSEXT_DIMENSION_TEX2D) { ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } switch (dds_info.extFormat) { case DXGI_FORMAT_R8_UNORM: { compression = NoCompression; alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; break; } case DXGI_FORMAT_B5G6R5_UNORM: { compression = NoCompression; alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; break; } case DXGI_FORMAT_B5G5R5A1_UNORM: { compression = NoCompression; alpha_trait = BlendPixelTrait; decoder = ReadUncompressedRGBA; break; } case DXGI_FORMAT_B8G8R8A8_UNORM: { compression = NoCompression; alpha_trait = BlendPixelTrait; decoder = ReadUncompressedRGBA; break; } case DXGI_FORMAT_R8G8B8A8_UNORM: { compression = NoCompression; alpha_trait = BlendPixelTrait; decoder = ReadUncompressedRGBA; break; } case DXGI_FORMAT_B8G8R8X8_UNORM: { compression = NoCompression; alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; break; } case DXGI_FORMAT_BC1_UNORM: { alpha_trait = UndefinedPixelTrait; compression = DXT1Compression; decoder = ReadDXT1; break; } case DXGI_FORMAT_BC2_UNORM: { alpha_trait = BlendPixelTrait; compression = DXT3Compression; decoder = ReadDXT3; break; } case DXGI_FORMAT_BC3_UNORM: { alpha_trait = BlendPixelTrait; compression = DXT5Compression; decoder = ReadDXT5; break; } case DXGI_FORMAT_BC7_UNORM: case DXGI_FORMAT_BC7_UNORM_SRGB: { alpha_trait = BlendPixelTrait; compression = BC7Compression; decoder = ReadBC7; break; } default: { /* Unknown format */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } } if (dds_info.extFlags & DDSEXTFLAGS_CUBEMAP) cubemap = MagickTrue; num_images = dds_info.extArraySize; break; } default: { /* Unknown FOURCC */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } } } else { /* Neither compressed nor uncompressed... thus unsupported */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } num_images = 1; if (cubemap) { /* Determine number of faces defined in the cubemap */ num_images = 0; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++; } if (volume) num_images = dds_info.depth; if ((num_images == 0) || (num_images > GetBlobSize(image))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireMagickResource(ListLengthResource,num_images) == MagickFalse) ThrowReaderException(ResourceLimitError,"ListLengthExceedsLimit"); option=GetImageOption(image_info,"dds:skip-mipmaps"); if (IsStringFalse(option) != MagickFalse) read_mipmaps=MagickTrue; for (n = 0; n < num_images; n++) { if (n != 0) { /* Start a new image */ if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } image->alpha_trait=alpha_trait; image->compression=compression; image->columns=dds_info.width; image->rows=dds_info.height; image->storage_class=DirectClass; image->endian=LSBEndian; image->depth=8; if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); (void) SetImageBackgroundColor(image,exception); status=(decoder)(image_info,image,&dds_info,read_mipmaps,exception); if (status == MagickFalse) { (void) CloseBlob(image); if (n == 0) return(DestroyImageList(image)); return(GetFirstImageInList(image)); } } (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterDDSImage() adds attributes for the DDS image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterDDSImage method is: % % RegisterDDSImage(void) % */ ModuleExport size_t RegisterDDSImage(void) { MagickInfo *entry; entry = AcquireMagickInfo("DDS","DDS","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry = AcquireMagickInfo("DDS","DXT1","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry = AcquireMagickInfo("DDS","DXT5","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } static void RemapIndices(const ssize_t *map, const unsigned char *source, unsigned char *target) { ssize_t i; for (i = 0; i < 16; i++) { if (map[i] == -1) target[i] = 3; else target[i] = source[map[i]]; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterDDSImage() removes format registrations made by the % DDS module from the list of supported formats. % % The format of the UnregisterDDSImage method is: % % UnregisterDDSImage(void) % */ ModuleExport void UnregisterDDSImage(void) { (void) UnregisterMagickInfo("DDS"); (void) UnregisterMagickInfo("DXT1"); (void) UnregisterMagickInfo("DXT5"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format. % % The format of the WriteDDSImage method is: % % MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % */ static size_t CompressAlpha(const size_t min, const size_t max, const size_t steps, const ssize_t *alphas, unsigned char* indices) { unsigned char codes[8]; ssize_t i; size_t error, index, j, least, value; codes[0] = (unsigned char) min; codes[1] = (unsigned char) max; codes[6] = 0; codes[7] = 255; for (i=1; i < (ssize_t) steps; i++) codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps); error = 0; for (i=0; i<16; i++) { if (alphas[i] == -1) { indices[i] = 0; continue; } value = alphas[i]; least = SIZE_MAX; index = 0; for (j=0; j<8; j++) { size_t dist; dist = value - (size_t)codes[j]; dist *= dist; if (dist < least) { least = dist; index = j; } } indices[i] = (unsigned char)index; error += least; } return error; } static MagickBooleanType ConstructOrdering(const size_t count, const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights, DDSVector4 *xSumwSum, unsigned char *order, size_t iteration) { float dps[16], f; ssize_t i; size_t j; unsigned char c, *o, *p; o = order + (16*iteration); for (i=0; i < (ssize_t) count; i++) { dps[i] = Dot(points[i],axis); o[i] = (unsigned char)i; } for (i=0; i < (ssize_t) count; i++) { for (j=i; j > 0 && dps[j] < dps[j - 1]; j--) { f = dps[j]; dps[j] = dps[j - 1]; dps[j - 1] = f; c = o[j]; o[j] = o[j - 1]; o[j - 1] = c; } } for (i=0; i < (ssize_t) iteration; i++) { MagickBooleanType same; p = order + (16*i); same = MagickTrue; for (j=0; j < count; j++) { if (o[j] != p[j]) { same = MagickFalse; break; } } if (same != MagickFalse) return MagickFalse; } xSumwSum->x = 0; xSumwSum->y = 0; xSumwSum->z = 0; xSumwSum->w = 0; for (i=0; i < (ssize_t) count; i++) { DDSVector4 v; j = (size_t) o[i]; v.x = points[j].w * points[j].x; v.y = points[j].w * points[j].y; v.z = points[j].w * points[j].z; v.w = points[j].w * 1.0f; VectorCopy44(v,&pointsWeights[i]); VectorAdd(*xSumwSum,v,xSumwSum); } return MagickTrue; } static void CompressClusterFit(const size_t count, const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3* end, unsigned char *indices) { DDSVector3 axis; DDSVector4 grid, gridrcp, half, onethird_onethird2, pointsWeights[16], two, twonineths, twothirds_twothirds2, xSumwSum; float bestError = 1e+37f; size_t bestIteration = 0, besti = 0, bestj = 0, bestk = 0, iterationIndex; ssize_t i; unsigned char *o, order[128], unordered[16]; VectorInit(half,0.5f); VectorInit(two,2.0f); VectorInit(onethird_onethird2,1.0f/3.0f); onethird_onethird2.w = 1.0f/9.0f; VectorInit(twothirds_twothirds2,2.0f/3.0f); twothirds_twothirds2.w = 4.0f/9.0f; VectorInit(twonineths,2.0f/9.0f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; grid.w = 0.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; gridrcp.w = 0.0f; xSumwSum.x = 0.0f; xSumwSum.y = 0.0f; xSumwSum.z = 0.0f; xSumwSum.w = 0.0f; ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0); for (iterationIndex = 0;;) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,1) \ num_threads(GetMagickResourceLimit(ThreadResource)) #endif for (i=0; i < (ssize_t) count; i++) { DDSVector4 part0, part1, part2; size_t ii, j, k, kmin; VectorInit(part0,0.0f); for(ii=0; ii < (size_t) i; ii++) VectorAdd(pointsWeights[ii],part0,&part0); VectorInit(part1,0.0f); for (j=(size_t) i;;) { if (j == 0) { VectorCopy44(pointsWeights[0],&part2); kmin = 1; } else { VectorInit(part2,0.0f); kmin = j; } for (k=kmin;;) { DDSVector4 a, alpha2_sum, alphax_sum, alphabeta_sum, b, beta2_sum, betax_sum, e1, e2, factor, part3; float error; VectorSubtract(xSumwSum,part2,&part3); VectorSubtract(part3,part1,&part3); VectorSubtract(part3,part0,&part3); VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum); VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum); VectorInit(alpha2_sum,alphax_sum.w); VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum); VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum); VectorInit(beta2_sum,betax_sum.w); VectorAdd(part1,part2,&alphabeta_sum); VectorInit(alphabeta_sum,alphabeta_sum.w); VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum); VectorMultiply(alpha2_sum,beta2_sum,&factor); VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor, &factor); VectorReciprocal(factor,&factor); VectorMultiply(alphax_sum,beta2_sum,&a); VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a); VectorMultiply(a,factor,&a); VectorMultiply(betax_sum,alpha2_sum,&b); VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b); VectorMultiply(b,factor,&b); VectorClamp(&a); VectorMultiplyAdd(grid,a,half,&a); VectorTruncate(&a); VectorMultiply(a,gridrcp,&a); VectorClamp(&b); VectorMultiplyAdd(grid,b,half,&b); VectorTruncate(&b); VectorMultiply(b,gridrcp,&b); VectorMultiply(b,b,&e1); VectorMultiply(e1,beta2_sum,&e1); VectorMultiply(a,a,&e2); VectorMultiplyAdd(e2,alpha2_sum,e1,&e1); VectorMultiply(a,b,&e2); VectorMultiply(e2,alphabeta_sum,&e2); VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2); VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2); VectorMultiplyAdd(two,e2,e1,&e2); VectorMultiply(e2,metric,&e2); error = e2.x + e2.y + e2.z; if (error < bestError) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (DDS_CompressClusterFit) #endif { if (error < bestError) { VectorCopy43(a,start); VectorCopy43(b,end); bestError = error; besti = i; bestj = j; bestk = k; bestIteration = iterationIndex; } } } if (k == count) break; VectorAdd(pointsWeights[k],part2,&part2); k++; } if (j == count) break; VectorAdd(pointsWeights[j],part1,&part1); j++; } } if (bestIteration != iterationIndex) break; iterationIndex++; if (iterationIndex == 8) break; VectorSubtract3(*end,*start,&axis); if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order, iterationIndex) == MagickFalse) break; } o = order + (16*bestIteration); for (i=0; i < (ssize_t) besti; i++) unordered[o[i]] = 0; for (i=besti; i < (ssize_t) bestj; i++) unordered[o[i]] = 2; for (i=bestj; i < (ssize_t) bestk; i++) unordered[o[i]] = 3; for (i=bestk; i < (ssize_t) count; i++) unordered[o[i]] = 1; RemapIndices(map,unordered,indices); } static void CompressRangeFit(const size_t count, const DDSVector4* points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end, unsigned char *indices) { float d, bestDist, max, min, val; DDSVector3 codes[4], grid, gridrcp, half, dist; ssize_t i; size_t bestj, j; unsigned char closest[16]; VectorInit3(half,0.5f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; if (count > 0) { VectorCopy43(points[0],start); VectorCopy43(points[0],end); min = max = Dot(points[0],principle); for (i=1; i < (ssize_t) count; i++) { val = Dot(points[i],principle); if (val < min) { VectorCopy43(points[i],start); min = val; } else if (val > max) { VectorCopy43(points[i],end); max = val; } } } VectorClamp3(start); VectorMultiplyAdd3(grid,*start,half,start); VectorTruncate3(start); VectorMultiply3(*start,gridrcp,start); VectorClamp3(end); VectorMultiplyAdd3(grid,*end,half,end); VectorTruncate3(end); VectorMultiply3(*end,gridrcp,end); codes[0] = *start; codes[1] = *end; codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f)); codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f)); codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f)); codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f)); codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f)); codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f)); for (i=0; i < (ssize_t) count; i++) { bestDist = 1e+37f; bestj = 0; for (j=0; j < 4; j++) { dist.x = (points[i].x - codes[j].x) * metric.x; dist.y = (points[i].y - codes[j].y) * metric.y; dist.z = (points[i].z - codes[j].z) * metric.z; d = Dot(dist,dist); if (d < bestDist) { bestDist = d; bestj = j; } } closest[i] = (unsigned char) bestj; } RemapIndices(map, closest, indices); } static void ComputeEndPoints(const DDSSingleColorLookup *lookup[], const unsigned char *color, DDSVector3 *start, DDSVector3 *end, unsigned char *index) { ssize_t i; size_t c, maxError = SIZE_MAX; for (i=0; i < 2; i++) { const DDSSourceBlock* sources[3]; size_t error = 0; for (c=0; c < 3; c++) { sources[c] = &lookup[c][color[c]].sources[i]; error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error); } if (error > maxError) continue; start->x = (float) sources[0]->start / 31.0f; start->y = (float) sources[1]->start / 63.0f; start->z = (float) sources[2]->start / 31.0f; end->x = (float) sources[0]->end / 31.0f; end->y = (float) sources[1]->end / 63.0f; end->z = (float) sources[2]->end / 31.0f; *index = (unsigned char) (2*i); maxError = error; } } static void ComputePrincipleComponent(const float *covariance, DDSVector3 *principle) { DDSVector4 row0, row1, row2, v; ssize_t i; row0.x = covariance[0]; row0.y = covariance[1]; row0.z = covariance[2]; row0.w = 0.0f; row1.x = covariance[1]; row1.y = covariance[3]; row1.z = covariance[4]; row1.w = 0.0f; row2.x = covariance[2]; row2.y = covariance[4]; row2.z = covariance[5]; row2.w = 0.0f; VectorInit(v,1.0f); for (i=0; i < 8; i++) { DDSVector4 w; float a; w.x = row0.x * v.x; w.y = row0.y * v.x; w.z = row0.z * v.x; w.w = row0.w * v.x; w.x = (row1.x * v.y) + w.x; w.y = (row1.y * v.y) + w.y; w.z = (row1.z * v.y) + w.z; w.w = (row1.w * v.y) + w.w; w.x = (row2.x * v.z) + w.x; w.y = (row2.y * v.z) + w.y; w.z = (row2.z * v.z) + w.z; w.w = (row2.w * v.z) + w.w; a = (float) PerceptibleReciprocal(MagickMax(w.x,MagickMax(w.y,w.z))); v.x = w.x * a; v.y = w.y * a; v.z = w.z * a; v.w = w.w * a; } VectorCopy43(v,principle); } static void ComputeWeightedCovariance(const size_t count, const DDSVector4 *points, float *covariance) { DDSVector3 centroid; float total; size_t i; total = 0.0f; VectorInit3(centroid,0.0f); for (i=0; i < count; i++) { total += points[i].w; centroid.x += (points[i].x * points[i].w); centroid.y += (points[i].y * points[i].w); centroid.z += (points[i].z * points[i].w); } if( total > 1.192092896e-07F) { centroid.x /= total; centroid.y /= total; centroid.z /= total; } for (i=0; i < 6; i++) covariance[i] = 0.0f; for (i = 0; i < count; i++) { DDSVector3 a, b; a.x = points[i].x - centroid.x; a.y = points[i].y - centroid.y; a.z = points[i].z - centroid.z; b.x = points[i].w * a.x; b.y = points[i].w * a.y; b.z = points[i].w * a.z; covariance[0] += a.x*b.x; covariance[1] += a.x*b.y; covariance[2] += a.x*b.z; covariance[3] += a.y*b.y; covariance[4] += a.y*b.z; covariance[5] += a.z*b.z; } } static void WriteAlphas(Image *image, const ssize_t *alphas, size_t min5, size_t max5, size_t min7, size_t max7) { ssize_t i; size_t err5, err7, j; unsigned char indices5[16], indices7[16]; FixRange(min5,max5,5); err5 = CompressAlpha(min5,max5,5,alphas,indices5); FixRange(min7,max7,7); err7 = CompressAlpha(min7,max7,7,alphas,indices7); if (err7 < err5) { for (i=0; i < 16; i++) { unsigned char index; index = indices7[i]; if( index == 0 ) indices5[i] = 1; else if (index == 1) indices5[i] = 0; else indices5[i] = 9 - index; } min5 = max7; max5 = min7; } (void) WriteBlobByte(image,(unsigned char) min5); (void) WriteBlobByte(image,(unsigned char) max5); for(i=0; i < 2; i++) { size_t value = 0; for (j=0; j < 8; j++) { size_t index = (size_t) indices5[j + i*8]; value |= ( index << 3*j ); } for (j=0; j < 3; j++) { size_t byte = (value >> 8*j) & 0xff; (void) WriteBlobByte(image,(unsigned char) byte); } } } static void WriteIndices(Image *image, const DDSVector3 start, const DDSVector3 end, unsigned char *indices) { ssize_t i; size_t a, b; unsigned char remapped[16]; const unsigned char *ind; a = ColorTo565(start); b = ColorTo565(end); for (i=0; i<16; i++) { if( a < b ) remapped[i] = (indices[i] ^ 0x1) & 0x3; else if( a == b ) remapped[i] = 0; else remapped[i] = indices[i]; } if( a < b ) Swap(a,b); (void) WriteBlobByte(image,(unsigned char) (a & 0xff)); (void) WriteBlobByte(image,(unsigned char) (a >> 8)); (void) WriteBlobByte(image,(unsigned char) (b & 0xff)); (void) WriteBlobByte(image,(unsigned char) (b >> 8)); for (i=0; i<4; i++) { ind = remapped + 4*i; (void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) | (ind[3] << 6)); } } static void WriteCompressed(Image *image, const size_t count, DDSVector4 *points, const ssize_t *map, const MagickBooleanType clusterFit) { float covariance[16]; DDSVector3 end, principle, start; DDSVector4 metric; unsigned char indices[16]; VectorInit(metric,1.0f); VectorInit3(start,0.0f); VectorInit3(end,0.0f); ComputeWeightedCovariance(count,points,covariance); ComputePrincipleComponent(covariance,&principle); if ((clusterFit == MagickFalse) || (count == 0)) CompressRangeFit(count,points,map,principle,metric,&start,&end,indices); else CompressClusterFit(count,points,map,principle,metric,&start,&end,indices); WriteIndices(image,start,end,indices); } static void WriteSingleColorFit(Image *image, const DDSVector4 *points, const ssize_t *map) { DDSVector3 start, end; ssize_t i; unsigned char color[3], index, indexes[16], indices[16]; color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255); color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255); color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255); index=0; ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index); for (i=0; i< 16; i++) indexes[i]=index; RemapIndices(map,indexes,indices); WriteIndices(image,start,end,indices); } static void WriteFourCC(Image *image, const size_t compression, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { ssize_t x; ssize_t i, y, bx, by; const Quantum *p; for (y=0; y < (ssize_t) image->rows; y+=4) { for (x=0; x < (ssize_t) image->columns; x+=4) { MagickBooleanType match; DDSVector4 point, points[16]; size_t count = 0, max5 = 0, max7 = 0, min5 = 255, min7 = 255, columns = 4, rows = 4; ssize_t alphas[16], map[16]; unsigned char alpha; if (x + columns >= image->columns) columns = image->columns - x; if (y + rows >= image->rows) rows = image->rows - y; p=GetVirtualPixels(image,x,y,columns,rows,exception); if (p == (const Quantum *) NULL) break; for (i=0; i<16; i++) { map[i] = -1; alphas[i] = -1; } for (by=0; by < (ssize_t) rows; by++) { for (bx=0; bx < (ssize_t) columns; bx++) { if (compression == FOURCC_DXT5) alpha = ScaleQuantumToChar(GetPixelAlpha(image,p)); else alpha = 255; if (compression == FOURCC_DXT5) { if (alpha < min7) min7 = alpha; if (alpha > max7) max7 = alpha; if (alpha != 0 && alpha < min5) min5 = alpha; if (alpha != 255 && alpha > max5) max5 = alpha; } alphas[4*by + bx] = (size_t)alpha; point.x = (float)ScaleQuantumToChar(GetPixelRed(image,p)) / 255.0f; point.y = (float)ScaleQuantumToChar(GetPixelGreen(image,p)) / 255.0f; point.z = (float)ScaleQuantumToChar(GetPixelBlue(image,p)) / 255.0f; point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f; p+=GetPixelChannels(image); match = MagickFalse; for (i=0; i < (ssize_t) count; i++) { if ((points[i].x == point.x) && (points[i].y == point.y) && (points[i].z == point.z) && (alpha >= 128 || compression == FOURCC_DXT5)) { points[i].w += point.w; map[4*by + bx] = i; match = MagickTrue; break; } } if (match != MagickFalse) continue; points[count].x = point.x; points[count].y = point.y; points[count].z = point.z; points[count].w = point.w; map[4*by + bx] = count; count++; } } for (i=0; i < (ssize_t) count; i++) points[i].w = sqrt(points[i].w); if (compression == FOURCC_DXT5) WriteAlphas(image,alphas,min5,max5,min7,max7); if (count == 1) WriteSingleColorFit(image,points,map); else WriteCompressed(image,count,points,map,clusterFit); } } } static void WriteUncompressed(Image *image, ExceptionInfo *exception) { const Quantum *p; ssize_t x; ssize_t y; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(image,p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(image,p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(image,p))); if (image->alpha_trait != UndefinedPixelTrait) (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(image,p))); p+=GetPixelChannels(image); } } } static void WriteImageData(Image *image, const size_t pixelFormat, const size_t compression,const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { if (pixelFormat == DDPF_FOURCC) WriteFourCC(image,compression,clusterFit,weightByAlpha,exception); else WriteUncompressed(image,exception); } static MagickBooleanType WriteMipmaps(Image *image,const ImageInfo *image_info, const size_t pixelFormat,const size_t compression,const size_t mipmaps, const MagickBooleanType fromlist,const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,ExceptionInfo *exception) { const char *option; Image *mipmap_image, *resize_image; MagickBooleanType fast_mipmaps, status; ssize_t i; size_t columns, rows; columns=DIV2(image->columns); rows=DIV2(image->rows); option=GetImageOption(image_info,"dds:fast-mipmaps"); fast_mipmaps=IsStringTrue(option); mipmap_image=image; resize_image=image; status=MagickTrue; for (i=0; i < (ssize_t) mipmaps; i++) { if (fromlist == MagickFalse) { mipmap_image=ResizeImage(resize_image,columns,rows,TriangleFilter, exception); if (mipmap_image == (Image *) NULL) { status=MagickFalse; break; } } else { mipmap_image=mipmap_image->next; if ((mipmap_image->columns != columns) || (mipmap_image->rows != rows)) ThrowBinaryException(CoderError,"ImageColumnOrRowSizeIsNotSupported", image->filename); } DestroyBlob(mipmap_image); mipmap_image->blob=ReferenceBlob(image->blob); WriteImageData(mipmap_image,pixelFormat,compression,weightByAlpha, clusterFit,exception); if (fromlist == MagickFalse) { if (fast_mipmaps == MagickFalse) mipmap_image=DestroyImage(mipmap_image); else { if (resize_image != image) resize_image=DestroyImage(resize_image); resize_image=mipmap_image; } } columns=DIV2(columns); rows=DIV2(rows); } if (resize_image != image) resize_image=DestroyImage(resize_image); return(status); } static void WriteDDSInfo(Image *image, const size_t pixelFormat, const size_t compression, const size_t mipmaps) { char software[MagickPathExtent]; ssize_t i; unsigned int format, caps, flags; flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); caps=(unsigned int) DDSCAPS_TEXTURE; format=(unsigned int) pixelFormat; if (format == DDPF_FOURCC) flags=flags | DDSD_LINEARSIZE; else flags=flags | DDSD_PITCH; if (mipmaps > 0) { flags=flags | (unsigned int) DDSD_MIPMAPCOUNT; caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX); } if (format != DDPF_FOURCC && image->alpha_trait != UndefinedPixelTrait) format=format | DDPF_ALPHAPIXELS; (void) WriteBlob(image,4,(unsigned char *) "DDS "); (void) WriteBlobLSBLong(image,124); (void) WriteBlobLSBLong(image,flags); (void) WriteBlobLSBLong(image,(unsigned int) image->rows); (void) WriteBlobLSBLong(image,(unsigned int) image->columns); if (pixelFormat == DDPF_FOURCC) { /* Compressed DDS requires linear compressed size of first image */ if (compression == FOURCC_DXT1) (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*8)); else /* DXT5 */ (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*16)); } else { /* Uncompressed DDS requires byte pitch of first image */ if (image->alpha_trait != UndefinedPixelTrait) (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 4)); else (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 3)); } (void) WriteBlobLSBLong(image,0x00); (void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1); (void) memset(software,0,sizeof(software)); (void) CopyMagickString(software,"IMAGEMAGICK",MagickPathExtent); (void) WriteBlob(image,44,(unsigned char *) software); (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,format); if (pixelFormat == DDPF_FOURCC) { (void) WriteBlobLSBLong(image,(unsigned int) compression); for(i=0;i < 5;i++) /* bitcount / masks */ (void) WriteBlobLSBLong(image,0x00); } else { (void) WriteBlobLSBLong(image,0x00); if (image->alpha_trait != UndefinedPixelTrait) { (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0xff000000); } else { (void) WriteBlobLSBLong(image,24); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0x00); } } (void) WriteBlobLSBLong(image,caps); for(i=0;i < 4;i++) /* ddscaps2 + reserved region */ (void) WriteBlobLSBLong(image,0x00); } static MagickBooleanType WriteDDSImage(const ImageInfo *image_info, Image *image, ExceptionInfo *exception) { const char *option; size_t compression, columns, maxMipmaps, mipmaps, pixelFormat, rows; MagickBooleanType clusterFit, fromlist, status, weightByAlpha; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace,exception); pixelFormat=DDPF_FOURCC; compression=FOURCC_DXT5; if (image->alpha_trait == UndefinedPixelTrait) compression=FOURCC_DXT1; if (LocaleCompare(image_info->magick,"dxt1") == 0) compression=FOURCC_DXT1; if (image_info->compression == DXT1Compression) compression=FOURCC_DXT1; else if (image_info->compression == NoCompression) pixelFormat=DDPF_RGB; option=GetImageOption(image_info,"dds:compression"); if (option != (char *) NULL) { if (LocaleCompare(option,"dxt1") == 0) compression=FOURCC_DXT1; if (LocaleCompare(option,"dxt5") == 0) compression=FOURCC_DXT5; if (LocaleCompare(option,"none") == 0) pixelFormat=DDPF_RGB; } clusterFit=MagickFalse; weightByAlpha=MagickFalse; if (pixelFormat == DDPF_FOURCC) { option=GetImageOption(image_info,"dds:cluster-fit"); if (IsStringTrue(option) != MagickFalse) { clusterFit=MagickTrue; if (compression != FOURCC_DXT1) { option=GetImageOption(image_info,"dds:weight-by-alpha"); if (IsStringTrue(option) != MagickFalse) weightByAlpha=MagickTrue; } } } mipmaps=0; fromlist=MagickFalse; option=GetImageOption(image_info,"dds:mipmaps"); if (option != (char *) NULL) { if (LocaleNCompare(option,"fromlist",8) == 0) { Image *next; fromlist=MagickTrue; next=image->next; while(next != (Image *) NULL) { mipmaps++; next=next->next; } } } if ((mipmaps == 0) && ((image->columns & (image->columns - 1)) == 0) && ((image->rows & (image->rows - 1)) == 0)) { maxMipmaps=SIZE_MAX; if (option != (char *) NULL) maxMipmaps=StringToUnsignedLong(option); if (maxMipmaps != 0) { columns=image->columns; rows=image->rows; while ((columns != 1 || rows != 1) && mipmaps != maxMipmaps) { columns=DIV2(columns); rows=DIV2(rows); mipmaps++; } } } option=GetImageOption(image_info,"dds:raw"); if (IsStringTrue(option) == MagickFalse) WriteDDSInfo(image,pixelFormat,compression,mipmaps); else mipmaps=0; WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha, exception); if ((mipmaps > 0) && (WriteMipmaps(image,image_info,pixelFormat,compression, mipmaps,fromlist,clusterFit,weightByAlpha,exception) == MagickFalse)) return(MagickFalse); (void) CloseBlob(image); return(MagickTrue); }
GB_cast_array.c
//------------------------------------------------------------------------------ // GB_cast_array: typecast or copy an array //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Casts an input array Ax to an output array Cx with a different built-in // type. Does not handle user-defined types. #include "GB.h" #ifndef GBCOMPACT #include "GB_unop__include.h" #endif GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only void GB_cast_array // typecast an array ( GB_void *Cx, // output array const GB_Type_code code1, // type code for Cx GB_void *Ax, // input array const GB_Type_code code2, // type code for Ax const int8_t *restrict Ab, // bitmap for Ax const size_t user_size, // size of Ax and Cx if user-defined const int64_t anz, // number of entries in Cx and Ax const int nthreads // number of threads to use ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- if (anz == 0 || Cx == Ax) { // if anz is zero: no work to do, and the Ax and Cx pointer may be NULL // as well. If Cx and Ax are aliased, then no copy is needed. return ; } ASSERT (Cx != NULL) ; ASSERT (Ax != NULL) ; ASSERT (anz > 0) ; ASSERT (GB_code_compatible (code1, code2)) ; //-------------------------------------------------------------------------- // typecast the array //-------------------------------------------------------------------------- #ifndef GBCOMPACT //---------------------------------------------------------------------- // define the worker for the switch factory //---------------------------------------------------------------------- #define GB_unop_apply(zname,xname) \ GB (_unop_apply__identity ## zname ## xname) #define GB_WORKER(ignore1,zname,ztype,xname,xtype) \ { \ GrB_Info info = GB_unop_apply (zname,xname) \ ((ztype *) Cx, (xtype *) Ax, Ab, anz, nthreads) ; \ if (info == GrB_SUCCESS) return ; \ } \ break ; //---------------------------------------------------------------------- // launch the switch factory //---------------------------------------------------------------------- #include "GB_2type_factory.c" #endif //-------------------------------------------------------------------------- // generic worker //-------------------------------------------------------------------------- int64_t csize = GB_code_size (code1, user_size) ; int64_t asize = GB_code_size (code2, user_size) ; GB_cast_function cast_A_to_C = GB_cast_factory (code1, code2) ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; // Cx [p] = Ax [p] cast_A_to_C (Cx +(p*csize), Ax +(p*asize), asize) ; } }
Euclid_apply.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_Euclid.h" /* #include "Euclid_dh.h" */ /* #include "Mat_dh.h" */ /* #include "Factor_dh.h" */ /* #include "Parser_dh.h" */ /* #include "TimeLog_dh.h" */ /* #include "SubdomainGraph_dh.h" */ static void scale_rhs_private(Euclid_dh ctx, HYPRE_Real *rhs); static void permute_vec_n2o_private(Euclid_dh ctx, HYPRE_Real *xIN, HYPRE_Real *xOUT); static void permute_vec_o2n_private(Euclid_dh ctx, HYPRE_Real *xIN, HYPRE_Real *xOUT); #undef __FUNC__ #define __FUNC__ "Euclid_dhApply" void Euclid_dhApply(Euclid_dh ctx, HYPRE_Real *rhs, HYPRE_Real *lhs) { START_FUNC_DH HYPRE_Real *rhs_, *lhs_; HYPRE_Real t1, t2; t1 = hypre_MPI_Wtime(); /* default settings; for everything except PILU */ ctx->from = 0; ctx->to = ctx->m; /* case 1: no preconditioning */ if (! strcmp(ctx->algo_ilu, "none") || ! strcmp(ctx->algo_par, "none")) { HYPRE_Int i, m = ctx->m; for (i=0; i<m; ++i) lhs[i] = rhs[i]; goto END_OF_FUNCTION; } /*---------------------------------------------------------------- * permute and scale rhs vector *----------------------------------------------------------------*/ /* permute rhs vector */ if (ctx->sg != NULL) { /* hypre_printf("@@@@@@@@@@@@@@@@@ permute_vec_n2o_private\n"); */ permute_vec_n2o_private(ctx, rhs, lhs); CHECK_V_ERROR; rhs_ = lhs; lhs_ = ctx->work2; } else { rhs_ = rhs; lhs_ = lhs; } /* scale rhs vector */ if (ctx->isScaled) { /* hypre_printf("@@@@@@@@@@@@@@@@@ scale_rhs_private\n"); */ scale_rhs_private(ctx, rhs_); CHECK_V_ERROR; } /* note: rhs_ is permuted, scaled; the input, "rhs" vector has not been disturbed. */ /*---------------------------------------------------------------- * big switch to choose the appropriate triangular solve *----------------------------------------------------------------*/ /* sequential and mpi block jacobi cases */ if (np_dh == 1 || ! strcmp(ctx->algo_par, "bj") ) { Factor_dhSolveSeq(rhs_, lhs_, ctx); CHECK_V_ERROR; } /* pilu case */ else { Factor_dhSolve(rhs_, lhs_, ctx); CHECK_V_ERROR; } /*---------------------------------------------------------------- * unpermute lhs vector * (note: don't need to unscale, because we were clever) *----------------------------------------------------------------*/ if (ctx->sg != NULL) { permute_vec_o2n_private(ctx, lhs_, lhs); CHECK_V_ERROR; } END_OF_FUNCTION: ; t2 = hypre_MPI_Wtime(); /* collective timing for triangular solves */ ctx->timing[TRI_SOLVE_T] += (t2 - t1); /* collective timing for setup+krylov+triSolves (intent is to time linear solve, but this is at best probelematical!) */ ctx->timing[TOTAL_SOLVE_TEMP_T] = t2 - ctx->timing[SOLVE_START_T]; /* total triangular solve count */ ctx->its += 1; ctx->itsTotal += 1; END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "scale_rhs_private" void scale_rhs_private(Euclid_dh ctx, HYPRE_Real *rhs) { START_FUNC_DH HYPRE_Int i, m = ctx->m; REAL_DH *scale = ctx->scale; /* if matrix was scaled, must scale the rhs */ if (scale != NULL) { #ifdef USING_OPENMP_DH #pragma omp for schedule(static) #endif for (i=0; i<m; ++i) { rhs[i] *= scale[i]; } } END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "permute_vec_o2n_private" void permute_vec_o2n_private(Euclid_dh ctx, HYPRE_Real *xIN, HYPRE_Real *xOUT) { START_FUNC_DH HYPRE_Int i, m = ctx->m; HYPRE_Int *o2n = ctx->sg->o2n_col; for (i=0; i<m; ++i) xOUT[i] = xIN[o2n[i]]; END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "permute_vec_n2o_private" void permute_vec_n2o_private(Euclid_dh ctx, HYPRE_Real *xIN, HYPRE_Real *xOUT) { START_FUNC_DH HYPRE_Int i, m = ctx->m; HYPRE_Int *n2o = ctx->sg->n2o_row; for (i=0; i<m; ++i) xOUT[i] = xIN[n2o[i]]; END_FUNC_DH }
ast-dump-openmp-declare-variant-extensions-messages.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify %s // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify %s -x c++ int dummy() { return 1; } #pragma omp declare variant(dummy) match(implementation={extension(match_any,match_all)}, device={kind(cpu, gpu)}) // expected-error {{only a single match extension allowed per OpenMP context selector}} expected-note {{the previous context property 'match_any' used here}} // expected-note {{the ignored property spans until here}} int base1() { return 2; } #pragma omp declare variant(dummy) match(implementation={extension(match_none,match_none)}, device={kind(gpu, fpga)}) // expected-warning {{the context property 'match_none' was used already in the same 'omp declare variant' directive; property ignored}} expected-note {{the previous context property 'match_none' used here}} expected-note {{the ignored property spans until here}} int base2() { return 3; } #pragma omp declare variant(dummy) match(implementation={vendor(pgi), extension(match_none,match_any)}, device={kind(cpu, gpu)}) // expected-error {{only a single match extension allowed per OpenMP context selector}} expected-note {{the previous context property 'match_none' used here}} // expected-note {{the ignored property spans until here}} int base3() { return 4; } int test() { return base1() + base2() + base3(); }
atomic-11.c
/* { dg-do run } */ extern void abort (void); int x = 6; float y; int main (void) { int v; float f; #pragma omp atomic read v = x; if (v != 6) abort (); #pragma omp atomic write x = 17; #pragma omp atomic read v = x; if (v != 17) abort (); #pragma omp atomic update x++; #pragma omp atomic read v = x; if (v != 18) abort (); #pragma omp atomic capture v = x++; if (v != 18) abort (); #pragma omp atomic read v = x; if (v != 19) abort (); #pragma omp atomic capture v = ++x; if (v != 20) abort (); #pragma omp atomic read v = x; if (v != 20) abort (); #pragma omp atomic capture { v = x; x *= 3; } if (v != 20) abort (); #pragma omp atomic read v = x; if (v != 60) abort (); #pragma omp atomic capture { x |= 2; v = x; } if (v != 62) abort (); #pragma omp atomic read v = x; if (v != 62) abort (); #pragma omp atomic capture { v = x; x++; } if (v != 62) abort (); #pragma omp atomic capture { v = x; ++x; } if (v != 63) abort (); #pragma omp atomic capture { ++x; v = x; } if (v != 65) abort (); #pragma omp atomic capture {x++;v=x;}if (v != 66) abort (); #pragma omp atomic read v = x; if (v != 66) abort (); #pragma omp atomic capture { v = x; x--; } if (v != 66) abort (); #pragma omp atomic capture { v = x; --x; } if (v != 65) abort (); #pragma omp atomic capture { --x; v = x; } if (v != 63) abort (); #pragma omp atomic capture { x--; v = x; } if (v != 62) abort (); #pragma omp atomic read v = x; if (v != 62) abort (); #pragma omp atomic write y = 17.5f; #pragma omp atomic read f = y; if (f != 17.5) abort (); #pragma omp atomic update y *= 2.0f; #pragma omp atomic read f = y; if (y != 35.0) abort (); #pragma omp atomic capture f = y *= 2.0f; if (f != 70.0) abort (); #pragma omp atomic capture f = y++; if (f != 70.0) abort (); #pragma omp atomic read f = y; if (f != 71.0) abort (); #pragma omp atomic capture f = --y; if (f != 70.0) abort (); #pragma omp atomic read f = y; if (f != 70.0) abort (); #pragma omp atomic capture { f = y; y /= 2.0f; } if (f != 70.0) abort (); #pragma omp atomic read f = y; if (f != 35.0) abort (); #pragma omp atomic capture { y /= 2.0f; f = y; } if (f != 17.5) abort (); #pragma omp atomic read f = y; if (f != 17.5) abort (); return 0; }
GB_unaryop__one_int8_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__one_int8_int8 // op(A') function: GB_tran__one_int8_int8 // C type: int8_t // A type: int8_t // cast: ; // unaryop: cij = 1 #define GB_ATYPE \ int8_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1 ; // casting #define GB_CASTING(z, x) \ ; ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ONE || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__one_int8_int8 ( int8_t *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__one_int8_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
local_sum-inl.h
// This is an example demonstrating the usage of mshadow ps #include <cstdio> // use openmp to launch multiple threads #include <omp.h> #include <mshadow/tensor.h> #include <mshadow-ps/mshadow_ps.h> // simple util to print result void Print_(mshadow::Tensor<mshadow::cpu, 2, float> ts) { for (mshadow::index_t i = 0; i < ts.size(0); ++i) { for (mshadow::index_t j = 0; j < ts.size(1); ++j) { printf("%g ", ts[i][j]); } printf("\n"); } } template<typename xpu> inline void Print(mshadow::Tensor<xpu, 2, float> ts) { mshadow::TensorContainer<mshadow::cpu, 2, float> tmp; tmp.Resize(ts.shape_); mshadow::Copy(tmp, ts); Print_(tmp); } // this function is runed by specific thread template<typename xpu> inline void RunWorkerThread(int devid, mshadow::ps::ISharedModel<xpu, float> *ps) { // initialize tensor engine mshadow::InitTensorEngine<xpu>(devid); mshadow::Stream<xpu> *stream = mshadow::NewStream<xpu>(); // allocate tensor on xpu mshadow::TensorContainer<xpu, 2> data(mshadow::Shape2(2, 3)); // set the computation stream to the new allocated stream // this will make subsequent computation whose target is data // to use the stream, stream is needed for async execution in GPU data.set_stream(stream); // assume these operations sets the content of dataient data[0] = 1.0f; data[1] = devid + data[0]; printf("dev%d: before sync, data:\n", devid); // use print to show result, do not call // print normally since Copy will block Print(data); printf("====================\n"); // intiaialize the key, register the shape on parameter server ps->InitKey(data[0].shape_, 0, devid); ps->InitKey(data[1].shape_, 1, devid); // push data[0] out, for update, or aggregation // 0 is the key of the data, devid is the current device id ps->Push(data[0], 0, devid); // pull request is used to request the data to be copied back // once computation is done ps->PullReq(data[0], 0, devid); // computation can be done here.. // the pull request handler will be overlapped with // similar as previous call ps->Push(data[1], 1, devid); ps->PullReq(data[1], 1, devid); // more computation can be done here... // the computation will be overlapped // PullWait will block until these request finishes ps->PullWait(0, devid); ps->PullWait(1, devid); printf("dev%d: after sync, data:\n", devid); // use print to show result, do not call // print normally since Copy will block Print(data); printf("====================\n"); mshadow::DeleteStream(stream); mshadow::ShutdownTensorEngine<xpu>(); } namespace mshadow { namespace ps { // model updater is used when update is happening on server side // if we only use parameter server for sum aggregation // this is not needed, but we must declare this function to return NULL template<> IModelUpdater<float> *CreateModelUpdater(void) { return NULL; } } } template<typename xpu> inline int Run(int argc, char *argv[]) { if (argc < 2) { printf("Usage: device list\n"\ "\tfor CPU the device list can be arbitrary\n"\ "\tfor GPU the device list need to be actual device index\n"); return 0; } #if MSHADOW_RABIT_PS rabit::Init(argc, argv); #endif // list of device ids std::vector<int> devs; // initialization for (int i = 1; i < argc; ++i) { // record the device id devs.push_back(atoi(argv[i])); } mshadow::ps::ISharedModel<xpu, float> *ps = mshadow::ps::CreateSharedModel<xpu, float>("local"); // intiaialize the ps ps->Init(devs); // use openmp to launch #devs threads #pragma omp parallel num_threads(devs.size()) { int tid = omp_get_thread_num(); RunWorkerThread<xpu>(devs[tid], ps); } delete ps; #if MSHADOW_RABIT_PS rabit::Finalize(); #endif return 0; }
sldtestunit.c
#include <stdio.h> // printf #include <inttypes.h> // uint64_t and int64_t vars displaying on printf // currently unused #include <stdlib.h> // atoi atof #include <math.h> #include "sldouble.h" #ifdef _WIN32 #include <Windows.h> int gettimeofday(struct timeval * tp, struct timezone * tzp) { static const uint64_t epoch = (uint64_t) 116444736000000000; uint64_t time; static SYSTEMTIME st; static FILETIME ft; GetSystemTime(&st); SystemTimeToFileTime(&st, &ft); time = (uint64_t) ft.dwLowDateTime; time += ((uint64_t) ft.dwHighDateTime) << 32; tp->tv_sec = (long) ((time - epoch) / 10000000); /* Despite the fact that time in FILETIME struct is measured * in 1/10 microseconds, 4 least significant decimal digits * are always returns with zero value, so we are * using milliseconds multiplied by 1000 * only for conformity with relative POSIX function */ tp->tv_usec = (long) (st.wMilliseconds * 1000); return 0; } #else #include <sys/time.h> #endif void test_mult(int acc); void test_sqrt(int acc); void test_pow(int acc); void test_speed_soft(void); void test_speed_fpu(void); double get_d_pseudo_random(int i); int strcmp_max(char *s1, char *s2, int max); int main(int argc, char **argv) { char argserror[] = "Usage:\n" "-------\n" "./sldmult mult | sqrt | fp | ip | div | pow OPTIONS\n\n" "OPTIONS:\n" "div: dividend(double) divisor(double)\n" " dividend divisor - returns quotient of input\n\n" "fp: number(double) power(double)\n" " number power - returns number raised to fraction part of given power\n\n" "ip: number(double) power(double)\n" " number power - returns number raised to integer part of given power\n\n" "mult: -b [precision] | factor1(double) factor2(double) | -f | -s\n" " -b - big accuracy test of multiplication with default precision\n" " 12 digits after decimal point or with\n" " selected precision but not greater than 16\n" " factor1 factor2 - multiplication of 2 selected numbers\n" " -f - fpu multiplication speed test (it's here for historical reasons)\n" " -s - soft multiplication speed test (it's here for historical reasons)\n\n" "pow: -b [precision] | number(double) power(double)\n" " -b - big accuracy test of raising number to given power \n" " with default precision 12 digits after decimal point\n" " or with selected precision but not greater than 16\n" " number power - returns number raised to the given power\n\n" "sqrt: -b [precision] | number(double)\n" " -b - big accuracy test of square root with default precision 12 digits\n" " after decimal point or with selected precision\n" " but not greater than 16\n" " number - selected number for square root test\n\n" "P.S.: All big tests are include 64,000,000 operations for fpu " "and just as much for soft. Also, if you wish to see where result " "between fpu and sldouble calculations would be different you need " "to run one of the big tests with precision 16 - that is now " "the only precision by which you realy see that. And even with it, " "the total accuracy is greater that 99,99% in all tests.\n\n" "P.P.S.: Currently the only functions that have checkings on input " "values are mult, sqrt and pow. They properly hold any legal " "double value in contrast to fp, ip and div"; double a,b,acc; struct timeval tim1, tim2; gettimeofday(&tim1,NULL); if (argc < 2) { printf("%s", argserror); return 1; } else { if (!strcmp_max("mult", *++argv, 5)) { if (argc > 2) { if (!strcmp_max("-f", *++argv, 3)) test_speed_fpu(); else if (!strcmp_max("-s", *argv, 3)) test_speed_soft(); else if (!strcmp_max("-b", *argv, 3)){ if (argc > 3 && (acc = atoi(*++argv))) test_mult(acc); else test_mult(12); } else if (argc > 3) { a = atof(*argv); b = atof(*++argv); printf("sldout: %.16e\nfpuout: %.16e\n", mult_by_sd(a, b), a*b); return 0; } else { printf("%s", argserror); return 1; } } else { printf("%s", argserror); return 1; } } else if (!strcmp_max("sqrt", *argv, 5)) { if (argc > 2) { if (!strcmp_max("-b", *++argv, 3)){ if (argc > 3 && (acc = atoi(*++argv))) test_sqrt(acc); else test_sqrt(12); } else { a = atof(*argv); printf("sldout: %.16e\nfpuout: %.16e\n", sqrt_by_sd(a), sqrt(a)); return 0; } } else { printf("%s", argserror); return 1; } } else if (!strcmp_max("fp", *argv, 3)) { if (argc > 2) { if (argc > 3) { a = atof(*++argv); b = atof(*++argv); printf("sldout: %.16e\nfpuout: %.16e\n", fract_power_by_sd(a, b), pow(a,b)); return 0; } else { printf("%s", argserror); return 1; } } else { printf("%s", argserror); return 1; } } else if (!strcmp_max("ip", *argv, 3)) { if (argc > 2) { if (argc > 3) { a = atof(*++argv); b = atof(*++argv); printf("sldout: %.16e\nfpuout: %.16e\n", int_power_by_sd(a, b), pow(a,b)); return 0; } else { printf("%s", argserror); return 1; } } else { printf("%s", argserror); return 1; } } else if (!strcmp_max("div", *argv, 3)) { if (argc > 2) { if (argc > 3) { a = atof(*++argv); b = atof(*++argv); printf("sldout: %.16e\nfpuout: %.16e\n", division_by_sd(a, b), a/b); return 0; } else { printf("%s", argserror); return 1; } } else { printf("%s", argserror); return 1; } } else if (!strcmp_max("pow", *argv, 3)) { if (argc > 2) { if (!strcmp_max("-b", *++argv, 3)){ if (argc > 3 && (acc = atoi(*++argv))) test_pow(acc); else test_pow(12); } else if (argc > 3) { a = atof(*argv); b = atof(*++argv); printf("sldout: %.16e\nfpuout: %.16e\n", pow_by_sd(a, b), pow(a,b)); return 0; } else { printf("%s", argserror); return 1; } } else { printf("%s", argserror); return 1; } } } gettimeofday(&tim2,NULL); printf("Time elapsed: %ldns\n", tim2.tv_usec - tim1.tv_usec + (tim2.tv_sec - tim1.tv_sec) * 1000000); } void test_mult(int acc) { if (acc > 16) acc = 16; double accuracy = 1.0; for (int z = 0; z < acc; z++, accuracy /= 10.0); int counter = 0, r = 0; double factor1 = 1.0e+200, factor2; double d1 = 0.0, d2 = 0.0, r1 = 0.0, r2 = 0.0; for (int i = 0; i < 400; ++i) { factor1 /= 10; printf("***\n%.2e first random factor\n\n", factor1); factor2 = 1.0e+200; for (int j = 0; j < 400; ++j) { #pragma omp parallel for \ reduction(+:counter) \ firstprivate(d1) firstprivate(d2) \ firstprivate(r1) firstprivate(r2) firstprivate(r) for (int z = 0; z < 400; ++z) { d1 = get_d_pseudo_random(z % 16) * factor1; d2 = get_d_pseudo_random((j+z+31) % 16) * factor2; if (z % 2 == 0) d1 = -d1; if (z % 3 == 0) d2 = -d2; r1 = mult_by_sd(d1, d2); r2 = d1*d2; if (((r1 > 0 && r1 < r2) && ((r1 + r1 * accuracy) < r2)) || ((r1 < 0 && r1 > r2) && ((r1 + r1 * accuracy) > r2)) || ((r1 > 0 && r1 > r2) && ((r2 + r2 * accuracy) < r1)) || ((r1 < 0 && r1 < r2) && ((r2 + r2 * accuracy) > r1))) r = 1; else r = 0; if (r) { printf("factor1: %.20e factor2: %.20e\nsld: %.20e\nfpu: %.20e\n" "\n------------------------------\n", d1, d2, r1, r2); counter++; } } factor2 /= 10; } } printf("SUMMARY:\n--------\n" "Tests of multiplication two random numbers was made: 64,000,000\n" "Used precision: +-%.2e\n" "Outputs that had missed accuracy: %d\n" "Total accuracy: %.8g%%\n", accuracy, counter, 100 - (counter/64000000.0) * 100); } void test_pow(int acc) { if (acc > 16) acc = 16; double accuracy = 1.0; for (int z = 0; z < acc; z++, accuracy /= 10.0); int counter = 0, r = 0; double factor1 = 1.0e+200, factor2; double d1 = 0.0, d2 = 0.0, r1 = 0.0, r2 = 0.0; for (int i = 0; i < 400; ++i) { factor1 /= 10; printf("***\n%.2e number random factor\n\n", factor1); factor2 = 1.0e+200; for (int j = 0; j < 400; ++j) { #pragma omp parallel for \ reduction(+:counter) \ firstprivate(d1) firstprivate(d2) \ firstprivate(r1) firstprivate(r2) firstprivate(r) for (int z = 0; z < 400; ++z) { d1 = get_d_pseudo_random(z % 16) * factor1; d2 = get_d_pseudo_random((j+z+31) % 16) * factor2; if (z % 2 == 0) d1 = -d1; if (z % 3 == 0) d2 = -d2; r1 = pow_by_sd(d1, d2); r2 = pow(d1,d2); if (((r1 > 0 && r1 < r2) && ((r1 + r1 * accuracy) < r2)) || ((r1 < 0 && r1 > r2) && ((r1 + r1 * accuracy) > r2)) || ((r1 > 0 && r1 > r2) && ((r2 + r2 * accuracy) < r1)) || ((r1 < 0 && r1 < r2) && ((r2 + r2 * accuracy) > r1)) || (r1 != r1 && r2 == r2) || (r1 == r1 && r2 != r2)) r = 1; else r = 0; if (r) { printf("number: %.20e power: %.20e\nsld: %.20e\nfpu: %.20e\n" "\n------------------------------\n", d1, d2, r1, r2); counter++; } } factor2 /= 10; } } printf("SUMMARY:\n--------\n" "Tests of raising random number to random power was made: 64,000,000\n" "Used precision: +-%.2e\n" "Outputs that had missed accuracy: %d\n" "Total accuracy: %.8g%%\n", accuracy, counter, 100 - (counter/64000000.0) * 100); } void test_sqrt(int acc) { if (acc > 16) acc = 16; double accuracy = 1.0; for (int z = 0; z < acc; z++, accuracy /= 10.0); int counter = 0, r = 0; double factor1 = 1.0e+200, factor2; double d = 0.0, r1= 0.0, r2 = 0.0; for (int i = 0; i < 400; ++i) { factor1 /= 10; printf("***\n%.2e first random factor\n\n", factor1); factor2 = 1.0e+200; for (int j = 0; j < 400; ++j) { #pragma omp parallel for \ reduction(+:counter) \ firstprivate(d) firstprivate(r1) \ firstprivate(r2) firstprivate(r) for (int z = 0; z < 400; ++z) { d = get_d_pseudo_random(z % 16) * factor1 * factor2; r1 = sqrt_by_sd(d); r2 = sqrt(d); if (((r1 > 0 && r1 < r2) && ((r1 + r1 * accuracy) < r2)) || ((r1 < 0 && r1 > r2) && ((r1 + r1 * accuracy) > r2)) || ((r1 > 0 && r1 > r2) && ((r2 + r2 * accuracy) < r1)) || ((r1 < 0 && r1 < r2) && ((r2 + r2 * accuracy) > r1))) r = 1; else r = 0; if (r) { printf("in: %.20e\nsld: %.20e\nfpu: %.20e\n" "\n------------------------------\n", d, r1, r2); counter++; } } factor2 /= 10; } } printf("SUMMARY:\n--------\n" "Tests of extracting square root from random number was made: 64,000,000\n" "Used precision: +-%.2e\n" "Outputs that had missed accuracy: %d\n" "Total accuracy: %.8g%%\n", accuracy, counter, 100 - (counter/64000000.0) * 100); } double get_d_pseudo_random(int i) { static int s = 0; struct timeval tim; gettimeofday(&tim,NULL); s++; double d = tim.tv_usec/1000000.0 * ((s % 100000)+1) * ((i % 65536) + 1) / 6553600000.0; if (d >= 1) return d/2; else return d; } int strcmp_max(char *s1, char *s2, int max) { while (max--) { if (*s1 > *s2) return 1; else if (*s1 < *s2) return -1; else if (*s1 == '\0') return 0; s1++; s2++; } return 0; } /* When profiling with gprof get_d_pseudo_random() function * calls inside test_speed_fpu() are get 66.6% of all time * that is 100ms long and without its expense we have * 33ms of time to compute 800000 products by fpu * (on my test stand, of course) */ void test_speed_fpu(void) { int i, j, r; double factor1 = 1.0e+200, factor2, d1, d2; /* Calculation of res is a workaround for optimizator */ double res = 0.0; for (i = 0; i < 400; i++) { factor1 = factor1 / 10; factor2 = 1.0e+200; for (j = 0; j < 2000; j++) { if (j % 50 == 0) factor2 = factor2 / 10; d1 = get_d_pseudo_random(j % 16) * factor1; d2 = get_d_pseudo_random((i+j+31) % 16) * factor2; if (j % 2 == 0) d1 = -d1; if (j % 3 == 0) d2 = -d2; r = d1*d2; res += r / (i * j + 1); } } printf("%e\n", res); } /* When profiling with gprof get_d_pseudo_random() function * calls inside test_speed_soft() are get 4.2% of all time * that is 600ms long and without its expense we have * 575ms of time to compute 800000 products through sldouble * (on my test stand, of course) */ /* What is intresting, is that here computings of * pseudorandom numbers were 2.5 times faster that * they were in the test_speed_fpu() case. Probably some of the * executions were going natively in parallel. */ void test_speed_soft(void) { int i, j, r; double factor1 = 1.0e+200, factor2, d1, d2; /* Calculation of res is a workaround for optimizator */ double res = 0.0; for (i = 0; i < 400; i++) { factor1 = factor1 / 10; factor2 = 1.0e+200; for (j = 0; j < 2000; j++) { if (j % 50 == 0) factor2 = factor2 / 10; d1 = get_d_pseudo_random(j % 16) * factor1; d2 = get_d_pseudo_random((i+j+31) % 16) * factor2; if (j % 2 == 0) d1 = -d1; if (j % 3 == 0) d2 = -d2; r = mult_by_sd(d1, d2); res += r / (i * j + 1); } } printf("%e\n", res); }
d2d_memcpy.c
// RUN: %libomptarget-compile-aarch64-unknown-linux-gnu && env OMP_MAX_ACTIVE_LEVELS=2 %libomptarget-run-aarch64-unknown-linux-gnu | %fcheck-aarch64-unknown-linux-gnu // RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu && env OMP_MAX_ACTIVE_LEVELS=2 %libomptarget-run-powerpc64-ibm-linux-gnu | %fcheck-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu && env OMP_MAX_ACTIVE_LEVELS=2 %libomptarget-run-powerpc64le-ibm-linux-gnu | %fcheck-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-x86_64-pc-linux-gnu && env OMP_MAX_ACTIVE_LEVELS=2 %libomptarget-run-x86_64-pc-linux-gnu | %fcheck-x86_64-pc-linux-gnu -allow-empty #include <assert.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> const int magic_num = 7; int main(int argc, char *argv[]) { const int N = 128; const int num_devices = omp_get_num_devices(); // No target device, just return if (num_devices == 0) { printf("PASS\n"); return 0; } const int src_device = 0; int dst_device = 1; if (dst_device >= num_devices) dst_device = num_devices - 1; int length = N * sizeof(int); int *src_ptr = omp_target_alloc(length, src_device); int *dst_ptr = omp_target_alloc(length, dst_device); assert(src_ptr && "src_ptr is NULL"); assert(dst_ptr && "dst_ptr is NULL"); #pragma omp target teams distribute parallel for device(src_device) \ is_device_ptr(src_ptr) for (int i = 0; i < N; ++i) { src_ptr[i] = magic_num; } int rc = omp_target_memcpy(dst_ptr, src_ptr, length, 0, 0, dst_device, src_device); assert(rc == 0 && "error in omp_target_memcpy"); int *buffer = malloc(length); assert(buffer && "failed to allocate host buffer"); #pragma omp target teams distribute parallel for device(dst_device) \ map(from: buffer[0:N]) is_device_ptr(dst_ptr) for (int i = 0; i < N; ++i) { buffer[i] = dst_ptr[i] + magic_num; } for (int i = 0; i < N; ++i) assert(buffer[i] == 2 * magic_num); printf("PASS\n"); // Free host and device memory free(buffer); omp_target_free(src_ptr, src_device); omp_target_free(dst_ptr, dst_device); return 0; } // CHECK: PASS
render.h
/* Copyright (c) 2014 hole This software is released under the MIT License (http://kagamin.net/hole/license.txt). A part of this software is based on smallpt (http://www.kevinbeason.com/smallpt/) and released under the MIT License (http://kagamin.net/hole/smallpt-license.txt). */ #ifndef _RENDER_H_ #define _RENDER_H_ // #include <iostream> #if defined(_TWIN32) #define ARCH_WIN32 (1) #define ARCH_UEFI (0) #elif defined(_TUEFI) #define ARCH_WIN32 (0) #define ARCH_UEFI (1) #else #error define something. #endif #define fabs __builtin_fabs void entryPoint(); #if ARCH_WIN32 extern "C" { __declspec(dllimport) void* __stdcall GlobalAlloc(unsigned int dwFlags, unsigned int dwBytes); __declspec(dllimport) void ExitProcess(int); __declspec(dllimport) void OutputDebugStringW(const wchar_t*); } void WinMainCRTStartup() { entryPoint(); } #else #include "efi.h" void ConcatChar(wchar_t* s, wchar_t v); void ConcatDigit(wchar_t* s, int v, int width); void OutputDebugStringW(const wchar_t* s); EFI_STATUS _DllMainCRTStartup(EFI_HANDLE ih, EFI_SYSTEM_TABLE *st) { gSt = st; EFI_GUID guid = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID; st->BootServices->LocateProtocol(&guid, nullptr, (void**)&gp); st->BootServices->SetWatchdogTimer(0, 0, 0, nullptr); st->ConOut->ClearScreen(st->ConOut); st->ConOut->OutputString(st->ConOut, (CHAR16*)L"Booted.\r\n"); EFI_TIME startedTime; st->RuntimeServices->GetTime(&startedTime, nullptr); entryPoint(); EFI_TIME stoppedTime; st->RuntimeServices->GetTime(&stoppedTime, nullptr); { wchar_t s[128] = {}; auto duration = 0; auto b = startedTime.Hour*36000+startedTime.Minute*600+startedTime.Second*10+(startedTime.Nanosecond/1000)/100; auto e = stoppedTime.Hour*36000+stoppedTime.Minute*600+stoppedTime.Second*10+(stoppedTime.Nanosecond/1000)/100; auto d = e - b; auto sec = stoppedTime.Second - startedTime.Second; auto ms = (stoppedTime.Nanosecond - startedTime.Nanosecond) / 1000; ConcatDigit(s, d/600, 2); d = d % 600; ConcatChar(s, L':'); ConcatDigit(s, d/10, 2); d = d % 10; ConcatChar(s, L'.'); ConcatDigit(s, d, 1); OutputDebugStringW(s); } //TODO: Implement shutdown. while(1){}; return 0; } void ConcatChar(wchar_t* s, wchar_t v) { auto* headPtr = s; while(*headPtr){ ++headPtr; } *headPtr++ = v; *headPtr++ = L'\0'; } void ConcatDigit(wchar_t* s, int v, int width) { auto* headPtr = s; while(*headPtr){ ++headPtr; } int start = 1; for(int i=1; i<width; ++i){ start *= 10; } bool hit = true;// for(int i=start; i>0; i/=10){ int d = v/i; if(d){ *headPtr++ = L'0' + d; hit = true; }else{ if(hit){ *headPtr++ = L'0'; } } v -= d*i; if(v==0){ break; } } *headPtr++ = L'\0'; } void OutputDebugStringW(const wchar_t* s) { gSt->ConOut->OutputString(gSt->ConOut, (CHAR16*)s); } void ExitProcess(int) { OutputDebugStringW(L"ExitProcess.\r\n"); } void* GlobalAlloc(unsigned int, unsigned int size) { void* p = nullptr; auto ret = gSt->BootServices->AllocatePool(EfiLoaderData, size, &p); OutputDebugStringW(L"Allocated memory.\r\n"); if(ret != 0){ OutputDebugStringW(L"Memory allocation failed.\r\n"); while(1){} } return p; } #endif extern "C" { float sqrt(float x) { union { int i; float x; } u; u.x = x; u.i = (1<<29) + (u.i >> 1) - (1<<22); u.x += x/u.x; u.x = 0.25f*u.x + x/u.x; return u.x; } float sin(float x) { return x - (1.0f/(6.0f)) * x*x*x + (1.0f/(6.0f*20.0f)) * x*x*x*x*x - (1.0f/(6.0f*20.0f*43.0f)) * x*x*x*x*x*x*x + (1.0f/(6.0f*20.0f*43.0f*72.0f)) * x*x*x*x*x*x*x*x*x - (1.0f/(6.0f*20.0f*43.0f*72.0f*121.0f)) * x*x*x*x*x*x*x*x*x*x*x; } float cos(float x) { return 1.0f - (0.5f) * x*x + (1.0f/(24.0f)) * x*x*x*x - (1.0f/(24.0f*30.0f)) * x*x*x*x*x*x + (1.0f/(24.0f*30.0f*56.0f)) * x*x*x*x*x*x*x*x - (1.0f/(24.0f*30.0f*56.0f*90.0f)) * x*x*x*x*x*x*x*x*x*x + (1.0f/(24.0f*30.0f*56.0f*90.0f*132.0f)) * x*x*x*x*x*x*x*x*x*x*x*x; } float pow(float x, float s) { float r = x; for(float i=0; i< s; i+=1.0f){ x *= x; } return r; } void* memcpy(void* dst, const void* src, unsigned long long sz) { unsigned char* r = (unsigned char*)src; unsigned char* w = (unsigned char*)dst; while(sz){ *w = *r; ++w; ++r; --sz; } return dst; } void* allocate(unsigned long long sz); } extern void* operator new[](unsigned long long s) { return allocate(s); } #include "radiance.h" #include "ppm.h" #include "random.h" int tov(float v) { return (int)(edupt::clamp(v) * 255 + 0.5f); } void display(const edupt::Color* image, int startHeight, int endHeight, int width, int height) { bool flip = false; if(startHeight > endHeight){ auto tmp = startHeight; startHeight = endHeight; endHeight = tmp; flip = true; } endHeight = endHeight > height ? height : endHeight; #if ARCH_UEFI auto* headPtr = (unsigned char*)gp->Mode->FrameBufferBase; auto* currentPtr = headPtr; const auto vr = gp->Mode->Info->VerticalResolution; const auto hr = gp->Mode->Info->HorizontalResolution; currentPtr += startHeight * hr * 4; if(!flip) { for(int y=startHeight; y<vr; ++y) { if(y>=endHeight){ break; } for(int x=0; x<hr; ++x){ if(x>=width){ currentPtr += (hr-width)*4; break; } const int image_index = (height - y - 1) * width + x; const auto& pixel = image[image_index]; int v = x > 255 ? 255 : x; *currentPtr++ = tov(pixel.z); *currentPtr++ = tov(pixel.y); *currentPtr++ = tov(pixel.x); *currentPtr++ = 255; } } }else{ for(int y=vr-1; y>=startHeight; --y) { if(y>=endHeight){ continue; } for(int x=0; x<hr; ++x){ if(x>=width){ currentPtr += (hr-width)*4; break; } const int image_index = (height - y - 1) * width + x; const auto& pixel = image[image_index]; int v = x > 255 ? 255 : x; *currentPtr++ = tov(pixel.z); *currentPtr++ = tov(pixel.y); *currentPtr++ = tov(pixel.x); *currentPtr++ = 255; } } } #endif } namespace edupt { int render(const int width, const int height, const int samples, const int supersamples) { // カメラ位置 const Vec camera_position = Vec(50.0, 52.0, 220.0); const Vec camera_dir = normalize(Vec(0.0, -0.04, -1.0)); const Vec camera_up = Vec(0.0, 1.0, 0.0); // ワールド座標系でのスクリーンの大きさ const double screen_width = 30.0 * width / height; const double screen_height= 30.0; // スクリーンまでの距離 const double screen_dist = 40.0; // スクリーンを張るベクトル const Vec screen_x = normalize(cross(camera_dir, camera_up)) * screen_width; const Vec screen_y = normalize(cross(screen_x, camera_dir)) * screen_height; const Vec screen_center = camera_position + camera_dir * screen_dist; Color *image = new Color[width * height]; OutputDebugStringW(L"Rendering..."); // std::cout << width << "x" << height << " " << samples * (supersamples * supersamples) << " spp" << std::endl; // OpenMP // #pragma omp parallel for schedule(dynamic, 1) num_threads(4) for (int y = 0; y < height; y ++) { #if 0 #if ARCH_UEFI gSt->ConOut->ClearScreen(gSt->ConOut); wchar_t s[64] = {}; memcpy(s, L"Rendering... ", 13*sizeof(wchar_t)); ConcatDigit(s, 100.0 * y / (height - 1), 2); ConcatChar(s, '%'); OutputDebugStringW(s); #else OutputDebugStringW(L"Rendering %\r\n"); #endif #endif // std::cerr << "Rendering (y = " << y << ") " << (100.0 * y / (height - 1)) << "%" << std::endl; // display(image, y-1, y, width, height); Random rnd(y + 1); for (int x = 0; x < width; x ++) { const int image_index = (height - y - 1) * width + x; // supersamples x supersamples のスーパーサンプリング for (int sy = 0; sy < supersamples; sy ++) { for (int sx = 0; sx < supersamples; sx ++) { Color accumulated_radiance = Color(); // 一つのサブピクセルあたりsamples回サンプリングする for (int s = 0; s < samples; s ++) { const double rate = (1.0 / supersamples); const double r1 = sx * rate + rate / 2.0; const double r2 = sy * rate + rate / 2.0; // スクリーン上の位置 const Vec screen_position = screen_center + screen_x * ((r1 + x) / width - 0.5) + screen_y * ((r2 + y) / height- 0.5); // レイを飛ばす方向 const Vec dir = normalize(screen_position - camera_position); accumulated_radiance = accumulated_radiance + radiance(Ray(camera_position, dir), &rnd, 0) / samples / (supersamples * supersamples); } image[image_index] = image[image_index] + accumulated_radiance; } } } } OutputDebugStringW(L"Finished.\r\n"); // 出力 // save_ppm_file(std::string("image.ppm"), image, width, height); display(image, height, 0, width, height); return 0; } }; #endif
app.c
/** * Christina Giannoula * cgiannoula: christina.giann@gmail.com */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <string.h> #include <dpu.h> #include <dpu_log.h> #include <unistd.h> #include <getopt.h> #include <assert.h> #include <math.h> #include <omp.h> #include "../support/common.h" #include "../support/matrix.h" #include "../support/params.h" #include "../support/partition.h" #include "../support/timer.h" #include "../support/utils.h" // Define the DPU Binary path as DPU_BINARY here. #ifndef DPU_BINARY #define DPU_BINARY "./bin/spmv_dpu" #endif #define DPU_CAPACITY (64 << 20) // A DPU's capacity is 64 MB /* * Main Structures: * 1. Matrices * 2. Input vector * 3. Output vector * 4. Help structures for data partitioning */ static struct DBCOOMatrix* A; static struct DBCSRMatrix* B; static struct DCSRMatrix* C; static struct COOMatrix* D; static val_dt* x; static val_dt* y; static struct partition_info_t *part_info; /** * @brief Specific information for each DPU */ struct dpu_info_t { uint32_t block_rows_per_dpu; uint32_t prev_block_rows_dpu; uint32_t block_start; uint32_t blocks; uint32_t blocks_pad; uint32_t merge; }; struct dpu_info_t *dpu_info; /** * @brief find the dpus_per_vert_partition * @param factor n to create partitions * @param vertical_partitions * @param/return horz_partitions */ void find_partitions(uint32_t n, uint32_t *horz_partitions, uint32_t vert_partitions) { uint32_t dpus_per_vert_partition = n / vert_partitions; *horz_partitions = dpus_per_vert_partition; } /** * @brief initialize input vector * @param pointer to input vector and vector size */ void init_vector(val_dt* vec, uint32_t size) { for(unsigned int i = 0; i < size; ++i) { vec[i] = (val_dt) (i%4+1); } } /** * @brief compute output in the host */ void spmv_host(val_dt *y, struct DBCOOMatrix *dbcooMtx, val_dt *x) { uint64_t total_blocks = 0; for (uint32_t r = 0; r < dbcooMtx->horz_partitions; r++) { for (uint32_t c = 0; c < dbcooMtx->vert_partitions; c++) { uint32_t partition = r * dbcooMtx->vert_partitions + c; for(uint64_t n=0; n<dbcooMtx->blocks_per_partition[partition]; n++) { uint64_t i = dbcooMtx->bind[total_blocks + n].rowind; uint64_t j = dbcooMtx->bind[total_blocks + n].colind; for(uint64_t blr=0; blr<dbcooMtx->row_block_size; blr++){ val_dt acc = 0; for(uint64_t blc=0; blc<dbcooMtx->col_block_size; blc++) { acc += dbcooMtx->bval[total_blocks * dbcooMtx->row_block_size * dbcooMtx->col_block_size + n * dbcooMtx->col_block_size * dbcooMtx->row_block_size + blr * dbcooMtx->col_block_size + blc] * x[c * dbcooMtx->tile_width + j * dbcooMtx->col_block_size + blc]; } y[r * dbcooMtx->tile_height + i * dbcooMtx->row_block_size + blr] += acc; } } total_blocks += dbcooMtx->blocks_per_partition[partition]; } } } /** * @brief main of the host application */ int main(int argc, char **argv) { struct Params p = input_params(argc, argv); struct dpu_set_t dpu_set, dpu; uint32_t nr_of_dpus; // Allocate DPUs and load binary DPU_ASSERT(dpu_alloc(NR_DPUS, NULL, &dpu_set)); DPU_ASSERT(dpu_load(dpu_set, DPU_BINARY, NULL)); DPU_ASSERT(dpu_get_nr_dpus(dpu_set, &nr_of_dpus)); printf("[INFO] Allocated %d DPU(s)\n", nr_of_dpus); printf("[INFO] Allocated %d TASKLET(s) per DPU\n", NR_TASKLETS); unsigned int i; // Initialize input data D = readCOOMatrix(p.fileName); sortCOOMatrix(D); uint32_t horz_partitions = 0; uint32_t vert_partitions = p.vert_partitions; find_partitions(nr_of_dpus, &horz_partitions, p.vert_partitions); printf("[INFO] %dx%d Matrix Partitioning\n\n", horz_partitions, vert_partitions); C = coo2dcsr(D, horz_partitions, vert_partitions); freeCOOMatrix(D); B = dcsr2dbcsr(C, p.row_blsize, p.col_blsize); sortDBCSRMatrix(B); countNNZperBlockDBCSRMatrix(B); freeDCSRMatrix(C); A = dbcsr2dbcoo(B); freeDBCSRMatrix(B); // Initialize partition data part_info = partition_init(nr_of_dpus, NR_TASKLETS); // Initialize help data - Padding needed uint32_t ncols_pad = A->vert_partitions * A->tile_width + A->col_block_size; uint32_t tile_width_pad = A->num_block_cols * A->col_block_size; uint32_t nrows_pad = A->horz_partitions * A->tile_height + A->row_block_size; if (ncols_pad % (8 / byte_dt) != 0) ncols_pad = ncols_pad + ((8 / byte_dt) - (ncols_pad % (8 / byte_dt))); if (tile_width_pad % (8 / byte_dt) != 0) tile_width_pad = tile_width_pad + ((8 / byte_dt) - (tile_width_pad % (8 / byte_dt))); #if INT8 if (tile_width_pad % 2 != 0) tile_width_pad++; #endif if (nrows_pad % (8 / byte_dt) != 0) nrows_pad = nrows_pad + ((8 / byte_dt) - (nrows_pad % (8 / byte_dt))); // Allocate input vector x = (val_dt *) malloc(ncols_pad * sizeof(val_dt)); // Initialize input vector with arbitrary data init_vector(x, ncols_pad); // Initialize help data dpu_info = (struct dpu_info_t *) malloc(nr_of_dpus * sizeof(struct dpu_info_t)); dpu_arguments_t *input_args = (dpu_arguments_t *) malloc(nr_of_dpus * sizeof(dpu_arguments_t)); // Max limits for parallel transfers uint64_t max_block_rows_per_dpu = 0; uint64_t max_blocks_per_dpu = 0; // Timer for measurements Timer timer; i = 0; uint32_t total_blocks = 0; DPU_FOREACH(dpu_set, dpu, i) { // Find padding for block rows and non-zero elements needed for CPU-DPU transfers uint64_t block_rows_per_dpu = A->num_block_rows; uint64_t prev_block_rows_dpu = 0; if (block_rows_per_dpu > max_block_rows_per_dpu) max_block_rows_per_dpu = block_rows_per_dpu; unsigned int blocks; blocks = A->blocks_per_partition[i]; if (blocks > max_blocks_per_dpu) max_blocks_per_dpu = blocks; // Keep information per DPU dpu_info[i].block_rows_per_dpu = block_rows_per_dpu; dpu_info[i].prev_block_rows_dpu = prev_block_rows_dpu; dpu_info[i].blocks = blocks; // Find input arguments per DPU input_args[i].block_rows = block_rows_per_dpu; input_args[i].start_block_row = 0; input_args[i].tcols = tile_width_pad; input_args[i].row_block_size = A->row_block_size; input_args[i].col_block_size = A->col_block_size; //input_args[i].blocks = blocks; #if BLNC_TSKLT_BLOCK // Load-balance blocks across tasklets partition_tsklt_by_block(A, part_info, i, NR_TASKLETS, nr_of_dpus, total_blocks); #else // Load-balance nnzs across tasklets partition_tsklt_by_nnz(A, part_info, i, NR_TASKLETS, nr_of_dpus, total_blocks); #endif uint32_t t; for (t = 0; t < NR_TASKLETS; t++) { // Find input arguments per DPU input_args[i].start_block[t] = part_info->block_split_tasklet[i * (NR_TASKLETS+2) + t]; input_args[i].blocks_per_tasklet[t] = part_info->block_split_tasklet[i * (NR_TASKLETS+2) + (t+1)] - part_info->block_split_tasklet[i * (NR_TASKLETS+2) + t]; } total_blocks += A->blocks_per_partition[i]; } // Initialization for parallel transfers #if INT8 if (max_block_rows_per_dpu % 2 != 0) max_block_rows_per_dpu++; #endif if (max_blocks_per_dpu % 2 != 0) max_blocks_per_dpu++; // Re-allocations for padding needed A->bind = (struct bind_t *) realloc(A->bind, (max_blocks_per_dpu * nr_of_dpus * sizeof(struct bind_t))); A->bval = (val_dt *) realloc(A->bval, (max_blocks_per_dpu * A->row_block_size * A->col_block_size * nr_of_dpus * sizeof(val_dt))); y = (val_dt *) calloc((uint64_t) ((uint64_t) nr_of_dpus * (uint64_t) max_block_rows_per_dpu * A->row_block_size), sizeof(val_dt)); // Count total number of bytes to be transfered in MRAM of DPU unsigned long int total_bytes; total_bytes = (max_blocks_per_dpu * sizeof(struct bind_t)) + (max_blocks_per_dpu * A->row_block_size * A->col_block_size * sizeof(val_dt)) + (tile_width_pad * sizeof(val_dt)) + (max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt)); assert(total_bytes <= DPU_CAPACITY && "Bytes needed exceeded MRAM size"); // Copy input arguments to DPUs i = 0; DPU_FOREACH(dpu_set, dpu, i) { input_args[i].max_block_rows = max_block_rows_per_dpu; input_args[i].max_blocks = max_blocks_per_dpu; DPU_ASSERT(dpu_prepare_xfer(dpu, input_args + i)); } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, "DPU_INPUT_ARGUMENTS", 0, sizeof(dpu_arguments_t), DPU_XFER_DEFAULT)); // Copy input matrix to DPUs startTimer(&timer, 0); // Copy Browind + Bcolind i = 0; total_blocks = 0; DPU_FOREACH(dpu_set, dpu, i) { DPU_ASSERT(dpu_prepare_xfer(dpu, A->bind + total_blocks)); total_blocks += A->blocks_per_partition[i]; } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt) + tile_width_pad * sizeof(val_dt), max_blocks_per_dpu * sizeof(struct bind_t), DPU_XFER_DEFAULT)); // Copy Values i = 0; total_blocks = 0; DPU_FOREACH(dpu_set, dpu, i) { DPU_ASSERT(dpu_prepare_xfer(dpu, A->bval + ((uint64_t) total_blocks * A->row_block_size * A->col_block_size))); total_blocks += A->blocks_per_partition[i]; } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt) + tile_width_pad * sizeof(val_dt) + max_blocks_per_dpu * sizeof(struct bind_t), max_blocks_per_dpu * A->row_block_size * A->col_block_size * sizeof(val_dt), DPU_XFER_DEFAULT)); stopTimer(&timer, 0); // Copy input vector to DPUs startTimer(&timer, 1); i = 0; DPU_FOREACH(dpu_set, dpu, i) { uint32_t tile_vert_indx = i % A->vert_partitions; DPU_ASSERT(dpu_prepare_xfer(dpu, x + tile_vert_indx * A->tile_width)); } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt), tile_width_pad * sizeof(val_dt), DPU_XFER_DEFAULT)); stopTimer(&timer, 1); // Run kernel on DPUs startTimer(&timer, 2); DPU_ASSERT(dpu_launch(dpu_set, DPU_SYNCHRONOUS)); stopTimer(&timer, 2); #if LOG // Display DPU Log (default: disabled) DPU_FOREACH(dpu_set, dpu) { DPU_ASSERT(dpulog_read_for_dpu(dpu.dpu, stdout)); } #endif // Retrieve results for output vector from DPUs startTimer(&timer, 3); i = 0; DPU_FOREACH(dpu_set, dpu, i) { DPU_ASSERT(dpu_prepare_xfer(dpu, y + (i * max_block_rows_per_dpu * A->row_block_size))); } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_FROM_DPU, DPU_MRAM_HEAP_POINTER_NAME, 0, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt), DPU_XFER_DEFAULT)); stopTimer(&timer, 3); // Merge partial results to the host CPU startTimer(&timer, 4); uint32_t r, c, t; #pragma omp parallel for num_threads(p.nthreads) shared(A, y, max_block_rows_per_dpu) private(r,c,t) collapse(2) for (r = 0; r < A->horz_partitions; r++) { for (t = 0; t < A->tile_height; t++) { for (c = 1; c < A->vert_partitions; c++) { y[r * A->vert_partitions * max_block_rows_per_dpu * A->row_block_size + t] += y[r * A->vert_partitions * max_block_rows_per_dpu * A->row_block_size + c * max_block_rows_per_dpu * A->row_block_size + t]; } } } stopTimer(&timer, 4); // Print timing results printf("\n"); printf("Load Matrix "); printTimer(&timer, 0); printf("Load Input Vector "); printTimer(&timer, 1); printf("Kernel "); printTimer(&timer, 2); printf("Retrieve Output Vector "); printTimer(&timer, 3); printf("Merge Partial Results "); printTimer(&timer, 4); printf("\n\n"); #if CHECK_CORR // Check output val_dt *y_host = (val_dt *) calloc(nrows_pad, sizeof(val_dt)); spmv_host(y_host, A, x); bool status = true; i = 0; for (uint32_t r = 0; r < A->horz_partitions; r++) { for (uint32_t t = 0; t < A->tile_height; t++) { if((r * A->tile_height + t < A->nrows) && y_host[i] != y[r * A->vert_partitions * max_block_rows_per_dpu * A->row_block_size + t]) { status = false; } i++; } } if (status) { printf("[" ANSI_COLOR_GREEN "OK" ANSI_COLOR_RESET "] Outputs are equal\n"); } else { printf("[" ANSI_COLOR_RED "ERROR" ANSI_COLOR_RESET "] Outputs differ!\n"); } free(y_host); #endif // Deallocation freeDBCOOMatrix(A); free(x); free(y); partition_free(part_info); DPU_ASSERT(dpu_free(dpu_set)); return 0; }
gptl_papi.c
#if ( defined HAVE_PAPI ) #include <papi.h> #endif #include <stdlib.h> #if ( defined THREADED_OMP ) #include <omp.h> #elif ( defined THREADED_PTHREADS ) #include <pthread.h> #endif #include "private.h" typedef struct { int counter; /* PAPI counter */ char *prstr; /* print string for output timers (16 chars) */ char *str; /* descriptive print string (more descriptive than prstr) */ } Papientry; #if ( defined HAVE_PAPI ) /* Mapping of PAPI counters to short and long printed strings */ static Papientry papitable [] = { {PAPI_L1_DCM, "L1 Dcache miss ", "Level 1 data cache misses"}, {PAPI_L1_ICM, "L1 Icache miss ", "Level 1 instruction cache misses"}, {PAPI_L2_DCM, "L2 Dcache miss ", "Level 2 data cache misses"}, {PAPI_L2_ICM, "L2 Icache miss ", "Level 2 instruction cache misses"}, {PAPI_L3_DCM, "L3 Dcache miss ", "Level 3 data cache misses"}, {PAPI_L3_ICM, "L3 Icache miss ", "Level 3 instruction cache misses"}, {PAPI_L1_TCM, "L1 cache miss ", "Level 1 total cache misses"}, {PAPI_L2_TCM, "L2 cache miss ", "Level 2 total cache misses"}, {PAPI_L3_TCM, "L3 cache miss ", "Level 3 total cache misses"}, {PAPI_CA_SNP, "Snoops ", "Snoops "}, {PAPI_CA_SHR, "PAPI_CA_SHR ", "Request for shared cache line (SMP)"}, {PAPI_CA_CLN, "PAPI_CA_CLN ", "Request for clean cache line (SMP)"}, {PAPI_CA_INV, "PAPI_CA_INV ", "Request for cache line Invalidation (SMP)"}, {PAPI_CA_ITV, "PAPI_CA_ITV ", "Request for cache line Intervention (SMP)"}, {PAPI_L3_LDM, "L3 load misses ", "Level 3 load misses"}, {PAPI_L3_STM, "L3 store misses ", "Level 3 store misses"}, {PAPI_BRU_IDL,"PAPI_BRU_IDL ", "Cycles branch units are idle"}, {PAPI_FXU_IDL,"PAPI_FXU_IDL ", "Cycles integer units are idle"}, {PAPI_FPU_IDL,"PAPI_FPU_IDL ", "Cycles floating point units are idle"}, {PAPI_LSU_IDL,"PAPI_LSU_IDL ", "Cycles load/store units are idle"}, {PAPI_TLB_DM, "Data TLB misses ", "Data translation lookaside buffer misses"}, {PAPI_TLB_IM, "Inst TLB misses ", "Instr translation lookaside buffer misses"}, {PAPI_TLB_TL, "Tot TLB misses ", "Total translation lookaside buffer misses"}, {PAPI_L1_LDM, "L1 load misses ", "Level 1 load misses"}, {PAPI_L1_STM, "L1 store misses ", "Level 1 store misses"}, {PAPI_L2_LDM, "L2 load misses ", "Level 2 load misses"}, {PAPI_L2_STM, "L2 store misses ", "Level 2 store misses"}, {PAPI_BTAC_M, "BTAC miss ", "BTAC miss"}, {PAPI_PRF_DM, "PAPI_PRF_DM ", "Prefetch data instruction caused a miss"}, {PAPI_L3_DCH, "L3 DCache Hit ", "Level 3 Data Cache Hit"}, {PAPI_TLB_SD, "PAPI_TLB_SD ", "Xlation lookaside buffer shootdowns (SMP)"}, {PAPI_CSR_FAL,"PAPI_CSR_FAL ", "Failed store conditional instructions"}, {PAPI_CSR_SUC,"PAPI_CSR_SUC ", "Successful store conditional instructions"}, {PAPI_CSR_TOT,"PAPI_CSR_TOT ", "Total store conditional instructions"}, {PAPI_MEM_SCY,"Cyc Stalled Mem ", "Cycles Stalled Waiting for Memory Access"}, {PAPI_MEM_RCY,"Cyc Stalled MemR", "Cycles Stalled Waiting for Memory Read"}, {PAPI_MEM_WCY,"Cyc Stalled MemW", "Cycles Stalled Waiting for Memory Write"}, {PAPI_STL_ICY,"Cyc no InstrIss ", "Cycles with No Instruction Issue"}, {PAPI_FUL_ICY,"Cyc Max InstrIss", "Cycles with Maximum Instruction Issue"}, {PAPI_STL_CCY,"Cyc No InstrComp", "Cycles with No Instruction Completion"}, {PAPI_FUL_CCY,"Cyc Max InstComp", "Cycles with Maximum Instruction Completion"}, {PAPI_HW_INT, "HW interrupts ", "Hardware interrupts"}, {PAPI_BR_UCN, "Uncond br instr ", "Unconditional branch instructions executed"}, {PAPI_BR_CN, "Cond br instr ex", "Conditional branch instructions executed"}, {PAPI_BR_TKN, "Cond br instr tk", "Conditional branch instructions taken"}, {PAPI_BR_NTK, "Cond br instrNtk", "Conditional branch instructions not taken"}, {PAPI_BR_MSP, "Cond br instrMPR", "Conditional branch instructions mispred"}, {PAPI_BR_PRC, "Cond br instrCPR", "Conditional branch instructions corr. pred"}, {PAPI_FMA_INS,"FMA instr comp ", "FMA instructions completed"}, {PAPI_TOT_IIS,"Total instr iss ", "Total instructions issued"}, {PAPI_TOT_INS,"Total instr ex ", "Total instructions executed"}, {PAPI_INT_INS,"Int instr ex ", "Integer instructions executed"}, {PAPI_FP_INS, "FP instr ex ", "Floating point instructions executed"}, {PAPI_LD_INS, "Load instr ex ", "Load instructions executed"}, {PAPI_SR_INS, "Store instr ex ", "Store instructions executed"}, {PAPI_BR_INS, "br instr ex ", "Total branch instructions executed"}, {PAPI_VEC_INS,"Vec/SIMD instrEx", "Vector/SIMD instructions executed"}, {PAPI_RES_STL,"Cyc proc stalled", "Cycles processor is stalled on resource"}, {PAPI_FP_STAL,"Cyc any FP stall", "Cycles any FP units are stalled"}, {PAPI_TOT_CYC,"Total cycles ", "Total cycles"}, {PAPI_LST_INS,"Tot L/S inst ex ", "Total load/store inst. executed"}, {PAPI_SYC_INS,"Sync. inst. ex ", "Sync. inst. executed"}, {PAPI_L1_DCH, "L1 D Cache Hit ", "L1 D Cache Hit"}, {PAPI_L2_DCH, "L2 D Cache Hit ", "L2 D Cache Hit"}, {PAPI_L1_DCA, "L1 D Cache Acc ", "L1 D Cache Access"}, {PAPI_L2_DCA, "L2 D Cache Acc ", "L2 D Cache Access"}, {PAPI_L3_DCA, "L3 D Cache Acc ", "L3 D Cache Access"}, {PAPI_L1_DCR, "L1 D Cache Read ", "L1 D Cache Read"}, {PAPI_L2_DCR, "L2 D Cache Read ", "L2 D Cache Read"}, {PAPI_L3_DCR, "L3 D Cache Read ", "L3 D Cache Read"}, {PAPI_L1_DCW, "L1 D Cache Write", "L1 D Cache Write"}, {PAPI_L2_DCW, "L2 D Cache Write", "L2 D Cache Write"}, {PAPI_L3_DCW, "L3 D Cache Write", "L3 D Cache Write"}, {PAPI_L1_ICH, "L1 I cache hits ", "L1 instruction cache hits"}, {PAPI_L2_ICH, "L2 I cache hits ", "L2 instruction cache hits"}, {PAPI_L3_ICH, "L3 I cache hits ", "L3 instruction cache hits"}, {PAPI_L1_ICA, "L1 I cache acc ", "L1 instruction cache accesses"}, {PAPI_L2_ICA, "L2 I cache acc ", "L2 instruction cache accesses"}, {PAPI_L3_ICA, "L3 I cache acc ", "L3 instruction cache accesses"}, {PAPI_L1_ICR, "L1 I cache reads", "L1 instruction cache reads"}, {PAPI_L2_ICR, "L2 I cache reads", "L2 instruction cache reads"}, {PAPI_L3_ICR, "L3 I cache reads", "L3 instruction cache reads"}, {PAPI_L1_ICW, "L1 I cache write", "L1 instruction cache writes"}, {PAPI_L2_ICW, "L2 I cache write", "L2 instruction cache writes"}, {PAPI_L3_ICW, "L3 I cache write", "L3 instruction cache writes"}, {PAPI_L1_TCH, "L1 cache hits ", "L1 total cache hits"}, {PAPI_L2_TCH, "L2 cache hits ", "L2 total cache hits"}, {PAPI_L3_TCH, "L3 cache hits ", "L3 total cache hits"}, {PAPI_L1_TCA, "L1 cache access ", "L1 total cache accesses"}, {PAPI_L2_TCA, "L2 cache access ", "L2 total cache accesses"}, {PAPI_L3_TCA, "L3 cache access ", "L3 total cache accesses"}, {PAPI_L1_TCR, "L1 cache reads ", "L1 total cache reads"}, {PAPI_L2_TCR, "L2 cache reads ", "L2 total cache reads"}, {PAPI_L3_TCR, "L3 cache reads ", "L3 total cache reads"}, {PAPI_L1_TCW, "L1 cache writes ", "L1 total cache writes"}, {PAPI_L2_TCW, "L2 cache writes ", "L2 total cache writes"}, {PAPI_L3_TCW, "L3 cache writes ", "L3 total cache writes"}, {PAPI_FML_INS,"FM ins ", "FM ins"}, {PAPI_FAD_INS,"FA ins ", "FA ins"}, {PAPI_FDV_INS,"FD ins ", "FD ins"}, {PAPI_FSQ_INS,"FSq ins ", "FSq ins"}, {PAPI_FNV_INS,"Finv ins ", "Finv ins"}, {PAPI_FP_OPS, "FP ops executed ", "Floating point operations executed"}}; static const int nentries = sizeof (papitable) / sizeof (Papientry); static Papientry eventlist[MAX_AUX]; /* list of PAPI events to be counted */ static Papientry propeventlist[MAX_AUX]; /* list of PAPI events hoped to be counted */ static int nevents = 0; /* number of events: initialize to 0 */ static int nprop = 0; /* number of hoped events: initialize to 0 */ static int *EventSet; /* list of events to be counted by PAPI */ static long_long **papicounters; /* counters return from PAPI */ static char papiname[PAPI_MAX_STR_LEN]; /* returned from PAPI_event_code_to_name */ static const int BADCOUNT = -999999; /* Set counters to this when they are bad */ static int GPTLoverheadindx = -1; /* index into counters array */ static long_long *lastoverhead; /* needed because aux not available for overhead */ /* Function prototypes */ static int create_and_start_events (const int); /* ** GPTL_PAPIsetoption: enable or disable PAPI event defined by "counter". Called ** from GPTLsetoption. Since all events are off by default, val=false degenerates ** to a no-op. Coded this way to be consistent with the rest of GPTL ** ** Input args: ** counter: PAPI counter ** val: true or false for enable or disable ** ** Return value: 0 (success) or GPTLerror (failure) */ int GPTL_PAPIsetoption (const int counter, /* PAPI counter */ const int val) /* true or false for enable or disable */ { int n; /* loop index */ /* Just return if the flag says disable an option, because default is off */ if ( ! val) return 0; /* ** Loop through table looking for counter. If found, add the entry to the ** list of "proposed events". Won't know till init time whether the event ** is available on this arch. */ for (n = 0; n < nentries; n++) if (counter == papitable[n].counter) { if (nprop+1 > MAX_AUX) { return GPTLerror ("GPTL_PAPIsetoption: Event %s is too many\n", papitable[n].str); } else { propeventlist[nprop].counter = counter; propeventlist[nprop].prstr = papitable[n].prstr; propeventlist[nprop].str = papitable[n].str; printf ("GPTL_PAPIsetoption: will attempt to enable event %s\n", propeventlist[nprop].str); ++nprop; } return 0; } return GPTLerror ("GPTL_PAPIsetoption: counter %d does not exist\n", counter); } /* ** GPTL_PAPIinitialize(): Initialize the PAPI interface. Called from GPTLinitialize. ** PAPI_library_init must be called before any other PAPI routines. ** PAPI_thread_init is called subsequently if threading is enabled. ** Finally, allocate space for PAPI counters and start them. ** ** Input args: ** maxthreads: number of threads ** ** Return value: 0 (success) or GPTLerror or -1 (failure) */ int GPTL_PAPIinitialize (const int maxthreads) /* number of threads */ { int ret; /* return code */ int n; /* loop index */ int counter; /* PAPI counter */ int t; /* thread index */ int *rc; /* array of return codes from create_and_start_events */ bool badret; /* true if any bad return codes were found */ /* PAPI_library_init needs to be called before ANY other PAPI routine */ if ((ret = PAPI_library_init (PAPI_VER_CURRENT)) != PAPI_VER_CURRENT) return GPTLerror ("GPTL_PAPIinitialize: PAPI_library_init failure:%s %d\n", PAPI_strerror (ret), ret); /* PAPI_thread_init needs to be called if threading enabled */ #if ( defined THREADED_OMP ) if (PAPI_thread_init ((unsigned long (*)(void)) (omp_get_thread_num)) != PAPI_OK) return GPTLerror ("GPTL_PAPIinitialize: PAPI_thread_init failure\n"); #elif ( defined THREADED_PTHREADS ) if (PAPI_thread_init ((unsigned long (*)(void)) (pthread_self)) != PAPI_OK) return GPTLerror ("GPTL_PAPIinitialize: PAPI_thread_init failure\n"); #endif /* allocate and initialize static local space */ EventSet = (int *) GPTLallocate (maxthreads * sizeof (int)); papicounters = (long_long **) GPTLallocate (maxthreads * sizeof (long_long *)); lastoverhead = (long_long *) GPTLallocate (maxthreads * sizeof (long_long)); for (t = 0; t < maxthreads; t++) { EventSet[t] = PAPI_NULL; papicounters[t] = (long_long *) GPTLallocate (MAX_AUX * sizeof (long_long)); lastoverhead[t] = -1; } /* ** Loop over events set by earlier calls to GPTL_PAPIsetoption. For the ** events which can be counted on this architecture, fill in the values ** (array "eventlist") */ for (n = 0; n < nprop; n++) { counter = propeventlist[n].counter; if (PAPI_query_event (counter) != PAPI_OK) { (void) PAPI_event_code_to_name (counter, papiname); printf ("GPTL_PAPIinitialize: event %s not available on this arch\n", papiname); } else { if (nevents+1 > MAX_AUX) { (void) PAPI_event_code_to_name (counter, papiname); printf ("GPTL_PAPIinitialize: Event %s is too many\n", papiname); } else { if (counter == PAPI_TOT_CYC) GPTLoverheadindx = nevents; eventlist[nevents].counter = counter; eventlist[nevents].prstr = propeventlist[n].prstr; eventlist[nevents].str = propeventlist[n].str; printf ("GPTL_PAPIinitialize: event %s enabled\n", eventlist[nevents].str); ++nevents; } } } /* Event starting apparently must be within a threaded loop. */ if (nevents > 0) { rc = (int *) GPTLallocate (maxthreads * sizeof (int)); #pragma omp parallel for private (t) for (t = 0; t < maxthreads; t++) rc[t] = create_and_start_events (t); badret = false; for (t = 0; t < maxthreads; t++) if (rc[t] < 0) badret = true; free (rc); if (badret) return -1; } return 0; } /* ** create_and_start_events: Create and start the PAPI eventset. File-local. ** Threaded routine to create the "event set" (PAPI terminology) and start ** the counters. This is only done once, and is called from GPTL_PAPIinitialize ** ** Input args: ** t: thread number ** ** Return value: 0 (success) or GPTLerror (failure) */ static int create_and_start_events (const int t) /* thread number */ { int ret; int n; /* Create the event set */ if ((ret = PAPI_create_eventset (&EventSet[t])) != PAPI_OK) return GPTLerror ("GPTL_PAPIstart: failure creating eventset: %s\n", PAPI_strerror (ret)); /* Add requested events to the event set */ for (n = 0; n < nevents; n++) { if ((ret = PAPI_add_event (EventSet[t], eventlist[n].counter)) != PAPI_OK) { printf ("%s\n", PAPI_strerror (ret)); return GPTLerror ("GPTL_PAPIstart: failure adding event: %s\n", eventlist[n].str); } } /* Start the event set. It will only be read from now on--never stopped */ if ((ret = PAPI_start (EventSet[t])) != PAPI_OK) return GPTLerror ("%s\n", PAPI_strerror (ret)); return 0; } /* ** GPTL_PAPIstart: Start the PAPI counters (actually they are just read). ** Called from GPTLstart. ** ** Input args: ** t: thread number ** ** Output args: ** aux: struct containing the counters ** ** Return value: 0 (success) or GPTLerror (failure) */ int GPTL_PAPIstart (const int t, /* thread number */ Papistats *aux) /* struct containing PAPI stats */ { int ret; /* return code from PAPI lib calls */ int n; /* loop index */ /* If no events are to be counted just return */ if (nevents == 0) return 0; /* Read the counters */ if ((ret = PAPI_read (EventSet[t], papicounters[t])) != PAPI_OK) return GPTLerror ("GPTL_PAPIstart: %s\n", PAPI_strerror (ret)); /* ** Store the counter values. When GPTL_PAPIstop is called, the counters ** will again be read, and differenced with the values saved here. */ for (n = 0; n < nevents; n++) aux->last[n] = papicounters[t][n]; return 0; } /* ** GPTL_PAPIstop: Stop the PAPI counters (actually they are just read). ** Called from GPTLstop. ** ** Input args: ** t: thread number ** ** Input/output args: ** aux: struct containing the counters ** ** Return value: 0 (success) or GPTLerror (failure) */ int GPTL_PAPIstop (const int t, /* thread number */ Papistats *aux) /* struct containing PAPI stats */ { int ret; /* return code from PAPI lib calls */ int n; /* loop index */ long_long delta; /* change in counters from previous read */ /* If no events are to be counted just return */ if (nevents == 0) return 0; /* Read the counters */ if ((ret = PAPI_read (EventSet[t], papicounters[t])) != PAPI_OK) return GPTLerror ("GPTL_PAPIstop: %s\n", PAPI_strerror (ret)); /* ** Accumulate the difference since timer start in aux. ** If negative accumulation has occurred (unfortunately this can and does ** happen, especially on AIX), store a flag value (BADCOUNT) */ for (n = 0; n < nevents; n++) { delta = papicounters[t][n] - aux->last[n]; if (delta < 0) aux->accum[n] = BADCOUNT; else if (aux->accum[n] != BADCOUNT) aux->accum[n] += delta; } return 0; } /* ** GPTL_PAPIoverheadstart: Read the PAPI counters for overhead calcs (only ** possible if total cycles are being counted). Called from GPTLstart and GPTLstop. ** Have to set the static variable lastoverhead because a pointer to the correct ** aux timer is not yet available. ** ** Input args: ** t: thread number ** ** Return value: 0 (success) or GPTLerror (failure) */ int GPTL_PAPIoverheadstart (const int t) /* thread number */ { int ret; /* return code from PAPI lib routine */ /* If the overhead index hasn't been set we can't do anything */ if (GPTLoverheadindx < 0) return -1; if ((ret = PAPI_read (EventSet[t], papicounters[t])) != PAPI_OK) return GPTLerror ("GPTL_PAPIoverheadstart: %s\n", PAPI_strerror (ret)); lastoverhead[t] = papicounters[t][GPTLoverheadindx]; return 0; } /* ** GPTL_PAPIoverheadstop: Read the PAPI counters and record overhead (only ** possible if total cycles are being counted). Called from GPTLstart and GPTLstop. ** ** Input args: ** t: thread number ** ** Input/output args: ** aux: struct containing the overhead accumulator ** ** Return value: 0 (success) or GPTLerror (failure) */ int GPTL_PAPIoverheadstop (const int t, /* thread number */ Papistats *aux) /* struct containing PAPI stats */ { int ret; /* return code from PAPI_read */ long_long diff; /* difference in cycle count from when started */ /* overheadindx <= 0 means cycle counting was not enabled */ if (GPTLoverheadindx < 0) return -1; if ((ret = PAPI_read (EventSet[t], papicounters[t])) != PAPI_OK) return GPTLerror ("GPTL_PAPIoverheadstart: %s\n", PAPI_strerror (ret)); /* Accumulate the overhead cycles. Check for a negative increment */ diff = papicounters[t][GPTLoverheadindx] - lastoverhead[t]; if (diff < 0) aux->accum_cycles = BADCOUNT; else aux->accum_cycles += diff; return 0; } /* ** GPTL_PAPIprstr: Print the descriptive string for all enabled PAPI events. ** Called from GPTLpr. ** ** Input args: ** fp: file descriptor */ void GPTL_PAPIprstr (FILE *fp) /* file descriptor */ { int n; for (n = 0; n < nevents; n++) fprintf (fp, "%16s ", eventlist[n].prstr); if (lastoverhead[0] > -1) fprintf (fp, "Overhead (cycles)"); } /* ** GPTL_PAPIpr: Print PAPI counter values for all enabled events. Called from ** GPTLpr. ** ** Input args: ** fp: file descriptor ** aux: struct containing the counters */ void GPTL_PAPIpr (FILE *fp, /* file descriptor to write to */ const Papistats *aux) /* stats to write */ { int n; for (n = 0; n < nevents; n++) { if (aux->accum[n] < 1000000) fprintf (fp, "%16ld ", (long) aux->accum[n]); else fprintf (fp, "%16.10e ", (double) aux->accum[n]); } /* The check on lastoverhead > -1 determines whether it was ever set */ if (lastoverhead[0] > -1) if (aux->accum_cycles < 1000000) fprintf (fp, "%16ld ", (long) aux->accum_cycles); else fprintf (fp, "%16.10e ", (double) aux->accum_cycles); } /* ** GPTLPAPIprinttable: Print table of PAPI native counters. Not all are ** necessarily available on this architecture. This is the one routine ** in this file which is user-visible. No underscores in GPTLPAPIprinttable ** to avoid underscore weirdness of g77 */ void GPTLPAPIprinttable () { int n; for (n = 0; n < nentries; n++) printf ("%d %s\n", papitable[n].counter, papitable[n].str); } /* ** GPTL_PAPIadd: Accumulate PAPI counters. Called from add. ** ** Input/Output args: ** auxout: auxout = auxout + auxin ** ** Input args: ** auxin: counters to be summed into auxout */ void GPTL_PAPIadd (Papistats *auxout, /* output struct */ const Papistats *auxin) /* input struct */ { int n; for (n = 0; n < nevents; n++) if (auxin->accum[n] == BADCOUNT || auxout->accum[n] == BADCOUNT) auxout->accum[n] = BADCOUNT; else auxout->accum[n] += auxin->accum[n]; /* Overhead calcs */ if (auxin->accum_cycles == BADCOUNT || auxout->accum_cycles == BADCOUNT) auxout->accum_cycles = BADCOUNT; else auxout->accum_cycles += auxin->accum_cycles; } /* ** GPTL_PAPIfinalize: finalization routine must be called from single-threaded ** region. Free all malloc'd space */ void GPTL_PAPIfinalize (int maxthreads) { int t; for (t = 0; t < maxthreads; t++) free (papicounters[t]); free (EventSet); free (papicounters); free (lastoverhead); } #endif
resample_utils.h
/* Copyright 2020 MONAI Consortium Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // We need to define AT_PARALLEL_OPENMP (even if -fopenmp is // not used) so that at::parallel_for is defined somewhere. // This must be done before <ATen/Parallel.h> is included. // // Note that if AT_PARALLEL_OPENMP = 1 but compilation does not use // -fopenmp, omp pragmas will be ignored. In that case, the code will // be effectively sequential, and we don't have to worry about // operations being atomic. #if !(AT_PARALLEL_OPENMP) #if !(AT_PARALLEL_NATIVE) #if !(AT_PARALLEL_NATIVE_TBB) #error No parallel backend specified #endif #endif #endif // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // These are defines that help writing generic code for both GPU and CPU #ifdef __CUDACC__ #include <ATen/cuda/CUDAApplyUtils.cuh> #include <THC/THCAtomics.cuh> #define MONAI_INLINE __forceinline__ #define MONAI_DEVICE __device__ #define MONAI_HOST __host__ #define MONAI_ATOMIC_ADD monai::gpuAtomicAdd #define MONAI_NAMESPACE_DEVICE namespace cuda namespace monai { // atomicAdd API changed between pytorch 1.4 and 1.5. template <typename scalar_t, typename offset_t> static __forceinline__ __device__ void gpuAtomicAdd(scalar_t* ptr, offset_t offset, scalar_t value) { #if MONAI_TORCH_VERSION >= 10500 ::gpuAtomicAdd(ptr + offset, value); #else ::atomicAdd(ptr + offset, value); #endif } } // namespace monai #else #define MONAI_INLINE inline #define MONAI_DEVICE #define MONAI_HOST #define MONAI_ATOMIC_ADD monai::cpuAtomicAdd #define MONAI_NAMESPACE_DEVICE namespace cpu namespace monai { template <typename scalar_t, typename offset_t> static inline void cpuAtomicAdd(scalar_t* ptr, offset_t offset, scalar_t value) { #if AT_PARALLEL_OPENMP #pragma omp atomic #endif ptr[offset] += value; } } // namespace monai #endif // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #include <ATen/ATen.h> namespace monai { enum class BoundType : int64_t { Replicate, // Replicate last inbound value = clip coordinates DCT1, // Symmetric w.r.t. center of the last inbound voxel DCT2, // Symmetric w.r.t. edge of the last inbound voxel (=Neuman) DST1, // Asymmetric w.r.t. center of the last inbound voxel DST2, // Asymmetric w.r.t. edge of the last inbound voxel (=Dirichlet) DFT, // Circular / Wrap around the FOV Sliding, // For deformation-fields only: mixture of DCT2 and DST2 Zero, // Zero outside of the FOV NoCheck // /!\ Checks disabled: assume coordinates are inbound }; using BoundVectorRef = c10::ArrayRef<BoundType>; enum class InterpolationType : int64_t { Nearest, Linear, Quadratic, Cubic, FourthOrder, FifthOrder, SixthOrder, SeventhOrder }; using InterpolationVectorRef = c10::ArrayRef<InterpolationType>; static MONAI_INLINE MONAI_HOST std::ostream& operator<<(std::ostream& os, const BoundType& bound) { switch (bound) { case BoundType::Replicate: return os << "Replicate"; case BoundType::DCT1: return os << "DCT1"; case BoundType::DCT2: return os << "DCT2"; case BoundType::DST1: return os << "DST1"; case BoundType::DST2: return os << "DST2"; case BoundType::DFT: return os << "DFT"; case BoundType::Zero: return os << "Zero"; case BoundType::Sliding: return os << "Sliding"; case BoundType::NoCheck: return os << "NoCheck"; } return os << "Unknown bound"; } static MONAI_INLINE MONAI_HOST std::ostream& operator<<(std::ostream& os, const InterpolationType& itp) { switch (itp) { case InterpolationType::Nearest: return os << "Nearest"; case InterpolationType::Linear: return os << "Linear"; case InterpolationType::Quadratic: return os << "Quadratic"; case InterpolationType::Cubic: return os << "Cubic"; case InterpolationType::FourthOrder: return os << "FourthOrder"; case InterpolationType::FifthOrder: return os << "FifthOrder"; case InterpolationType::SixthOrder: return os << "SixthOrder"; case InterpolationType::SeventhOrder: return os << "SeventhOrder"; } return os << "Unknown interpolation order"; } } // namespace monai
axpy.c
#include "omp.h" void axpy(int N, float *Y, float *X, float a) { int i; #pragma omp target map(to:X[0:N]) map(tofrom:Y[0:N]) #pragma omp parallel for for (i = 0; i < N; ++i) Y[i] += a * X[i]; }
cilk_rpy_ewald_polyd.c
#include <stdio.h> #include <math.h> #include <stdlib.h> #include "timer.h" #include <sys/time.h> #include <cilk/cilk.h> #define M_PI 3.14159265358979323846 #define NTHREADS 240 inline void scalar_rpy_ewald_real(double r, double xi, double a3, double *m11, double *m12) { double a = 1.; double xi2 = xi*xi; double xi3 = xi2*xi; double xi5 = xi3*xi2; double xi7 = xi5*xi2; double r2 = r*r; double r4 = r2*r2; double ri = 1./r; double ri2 = ri*ri; double ri3 = ri*ri2; double erfc_xi_r = erfc(xi*r); double pi_exp = 1./sqrt(M_PI) * exp(-xi2*r2); *m11 = (0.75*a*ri + 0.5*a3*ri3)*erfc_xi_r + ( 4*xi7*a3*r4 + 3*xi3*a*r2 - 20*xi5*a3*r2 - 4.5*xi*a + 14*xi3*a3 + xi*a3*ri2)*pi_exp; *m12 = (0.75*a*ri - 1.5*a3*ri3)*erfc_xi_r + (-4*xi7*a3*r4 - 3*xi3*a*r2 + 16*xi5*a3*r2 + 1.5*xi*a - 2*xi3*a3 - 3*xi*a3*ri2)*pi_exp; } inline void scalar_rpy_ewald_recip(double k, double xi, double *m2) { double a = 1.; double a3 = 1.; double k2 = k*k; double xii2k2 = k2/(xi*xi); *m2 = (1. + 0.25*xii2k2 + 0.125*xii2k2*xii2k2) * 6.*M_PI/k2 * exp(-0.25*xii2k2); } // note: positions must be wrapped inside the box [0,L] int rpy_ewald(int np, double * restrict a, const double * restrict pos, double L, const double * restrict rad, double xi, int nr, int nk) { __declspec(align(64)) double rvec[8]; __declspec(align(64)) double rvec0[8]; __declspec(align(64)) double temp[8]; // double temp_0, temp_1, temp_2, temp_3, temp_4, temp_5; double a3; double m11, m12, m2; double eye3_coef; double r2, r; int x, y, z; int i, j; double *ap0, *ap; int vsize = ((2*nk+1)*(2*nk+1)*(2*nk+1) - 1) / 2; #define VSIZE ((2*6+1)*(2*6+1)*(2*6+1) - 1) / 2 // int A_VSIZE = ceil(VSIZE/8.0)*8; // int K_VSIZE = ceil(3*VSIZE/8.0)*8; // printf("check vsize=%d\n", A_VSIZE); __declspec(align(64)) double k_array[VSIZE];//1104 __declspec(align(64)) double m2_array[VSIZE];//1104 __declspec(align(64)) double kvec_array[3*VSIZE];//3296 int ind; __declspec(align(64)) double kvec[8]; double k; double t; double vinv = 1./(L*L*L); double time0, time1; double time0_real, time1_real; double time0_recip, time1_recip; // INDICES for converting for loops int _b, _index, ib, ib2; // ************************************************************************* // compute and save coefficients for reciprocal-space sum // Due to symmetry, only need half of the grid points ind = 0; _b = (2*nk+1); for (_index =0 ;_index < (_b*_b*_b -1)/2; _index++){// Using indices x,y,z are recalculated z = _index%(_b)-nk;// adjusting the indices x = (_index-_index%(_b*_b))/(_b*_b)-nk; y = (_index%(_b*_b)-_index%(_b))/_b-nk; k_array[ind] = 2.*M_PI/L*sqrt((double)(x*x + y*y + z*z)); scalar_rpy_ewald_recip(k_array[ind], xi, &m2_array[ind]); kvec_array[3*ind ] = 2.*M_PI/L*x; kvec_array[3*ind+1] = 2.*M_PI/L*y; kvec_array[3*ind+2] = 2.*M_PI/L*z; ind++; } // #pragma omp parallel for schedule(static) num_threads(NTHREADS) private(i, j, ap, ap0, _b, temp, eye3_coef, _index, rvec0, rvec, x, y, z, r, r2, m11, m12, a3 ) cilk_for (int _index1 = np*(np-1)/2-1; _index1>=0; _index1--){ int i, j, _b, _index, x, y, z; double *ap, *ap0, eye3_coef, r, r2, m11, m12, a3; __declspec(align(64)) double rvec[8]; __declspec(align(64)) double rvec0[8]; __declspec(align(64)) double temp[8]; i = np-1-(int)((1+sqrt(8*_index1+1))/2); j = np-1-_index1 + (int)((1+sqrt(8*_index1+1))/2)*((int)((1+sqrt(8*_index1+1))/2)-1)/2; temp[0] = 0.; temp[1] = 0.; temp[3] = 0.; temp[2] = 0.; temp[4] = 0.; temp[5] = 0.; eye3_coef = 0.; rvec0[0] = pos[3*i] - pos[3*j]; rvec0[1] = pos[3*i+1] - pos[3*j+1]; rvec0[2] = pos[3*i+2] - pos[3*j+2]; a3 = 0.5*(rad[i]*rad[i] + rad[j]*rad[j]); _b = (2*nr+1); //shared(eye3_coef, temp, rvec0, L, xi, a3, m11, m12, _b, xi3, xi5, xi7, xi) // #pragma omp parallel for schedule(static) private(rvec, x, y, z, r, r2, m11, m12) shared(eye3_coef, temp, rvec0, a3) for (_index =0 ;_index < _b*_b*_b; _index++){ z =_index%(_b)-nr;// adjusting the indices x = (_index-_index%(_b*_b))/(_b*_b)-nr; y = (_index%(_b*_b)-_index%(_b))/_b-nr; rvec[0] = rvec0[0] + x*L; rvec[1] = rvec0[1] + y*L; rvec[2] = rvec0[2] + z*L; // compute norm r2 = rvec[0]*rvec[0] + rvec[1]*rvec[1] + rvec[2]*rvec[2]; r = sqrt(r2); rvec[0] /= r; rvec[1] /= r; rvec[2] /= r; scalar_rpy_ewald_real(r, xi, a3, &m11, &m12); eye3_coef += m11; temp[0] += m12 * rvec[0] * rvec[0]; temp[1] += m12 * rvec[0] * rvec[1]; temp[2] += m12 * rvec[0] * rvec[2]; temp[3] += m12 * rvec[1] * rvec[1]; temp[4] += m12 * rvec[1] * rvec[2]; temp[5] += m12 * rvec[2] * rvec[2]; } // add contribution to eye3 term temp[0] += eye3_coef; temp[3] += eye3_coef; temp[5] += eye3_coef; // sum into global matrix (only lower-triangular part) // Use matlab to add transpose ap0 = &a[np*3*3*i + 3*j]; ap = ap0; *ap++ = temp[0]; *ap++ = temp[1]; *ap = temp[2]; ap = ap0+np*3; *ap++ = temp[1]; *ap++ = temp[3]; *ap = temp[4]; ap = ap0+np*3+np*3; *ap++ = temp[2]; *ap++ = temp[4]; *ap = temp[5]; } // ************************************************************************* // reciprocal-space sum // #pragma omp parallel for schedule(static) num_threads(NTHREADS) private(i, j, temp, ap, ap0, ind, rvec, kvec, k, m2, t, a3) cilk_for (_index = np*(np+1)/2-1; _index>=0; _index--){ int i, j, ind; double *ap, *ap0, k, m2, t, a3; __declspec(align(64)) double temp[8]; __declspec(align(64)) double rvec[8]; __declspec(align(64)) double kvec[8]; i = np-1-(int)((-1+sqrt(8*_index+1))/2); j = np-1-_index + (int)((-1+sqrt(8*_index+1))/2)*((int)((-1+sqrt(8*_index+1))/2)+1)/2; rvec[0] = pos[3*i+0] - pos[3*j]; rvec[1] = pos[3*i+1] - pos[3*j+1]; rvec[2] = pos[3*i+2] - pos[3*j+2]; temp[0] = 0.; temp[1] = 0.; temp[3] = 0.; temp[2] = 0.; temp[4] = 0.; temp[5] = 0.; a3 = 0.5*(rad[i]*rad[i] + rad[j]*rad[j]); for (ind=0; ind<vsize; ind++) { k = k_array[ind]; m2 = m2_array[ind]; kvec[0] = kvec_array[3*ind ]; kvec[1] = kvec_array[3*ind+1]; kvec[2] = kvec_array[3*ind+2]; t = 2.*vinv*m2*cos(kvec[0]*rvec[0] + kvec[1]*rvec[1] + kvec[2]*rvec[2])*(1.-a3*k*k/3.); kvec[0] /= k; kvec[1] /= k; kvec[2] /= k; temp[0] += t * (1. - kvec[0]*kvec[0]); temp[1] += t * - kvec[0]*kvec[1]; temp[2] += t * - kvec[0]*kvec[2]; temp[3] += t * (1. - kvec[1]*kvec[1]); temp[4] += t * - kvec[1]*kvec[2]; temp[5] += t * (1. - kvec[2]*kvec[2]); } // sum into matrix // sum with existing values ap0 = &a[np*3*3*i + 3*j]; ap = ap0; *ap++ += temp[0]; *ap++ += temp[1]; *ap += temp[2]; ap = ap0+np*3; *ap++ += temp[1]; *ap++ += temp[3];// diagonal element *ap += temp[4]; ap = ap0+np*3+np*3; *ap++ += temp[2]; *ap++ += temp[4]; *ap += temp[5];// diagonal element } // ************************************************************************* // self-part for (i=0; i<np; i++)// adding some term to diagonal { t = 1./rad[i] - (6. - 40./3.*xi*xi*rad[i]*rad[i])*xi/sqrt(M_PI); t *= 0.5; for (j=0; j<3; j++) { ind = 3*i+j; a[ind*np*3+ind] = a[ind*np*3+ind]*0.5+t;// taking care of (i==j) condition } } return 0; }
VolumetricConvolutionMM.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/VolumetricConvolutionMM.c" #else static void inline THNN_(VolumetricConvolutionMM_shapeCheck)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *weight, THTensor *bias, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH) { THNN_ARGCHECK(input->nDimension == 4 || input->nDimension == 5, 2, input, "4D or 5D (batch mode) tensor expected for input, but got: %s"); THArgCheck(kT > 0 && kW > 0 && kH > 0, 8, "kernel size should be greater than zero, but got kT: %d kH: %d kW: %d", kT, kH, kW); THArgCheck(dT > 0 && dW > 0 && dH > 0, 11, "stride should be greater than zero, but got dT: %d dH: %d dW: %d", dT, dH, dW); int ndim = input->nDimension; int dimf = 0; int dimt = 1; int dimh = 2; int dimw = 3; if (ndim == 5) { dimf++; dimt++; dimh++; dimw++; } long nInputPlane; long inputDepth; long inputHeight; long inputWidth; long nOutputPlane; long outputDepth; long outputHeight; long outputWidth; nInputPlane = input->size[dimf]; inputDepth = input->size[dimt]; inputHeight = input->size[dimh]; inputWidth = input->size[dimw]; nOutputPlane = weight->size[0]; outputDepth = (inputDepth + 2*pT - kT) / dT + 1; outputHeight = (inputHeight + 2*pH - kH) / dH + 1; outputWidth = (inputWidth + 2*pW - kW) / dW + 1; if (outputWidth < 1 || outputHeight < 1 || outputDepth < 1) { THError( "Given input size: (%dx%dx%dx%d). Calculated output size: (%dx%dx%dx%d). Output size is too small", nInputPlane, inputDepth, inputHeight, inputWidth, nOutputPlane, outputDepth, outputHeight, outputWidth ); } THArgCheck(weight->nDimension == 2 || weight->nDimension == 5, 4, "weight tensor should be 2D or 5D - got %d", weight->nDimension); if (bias != NULL) { THNN_CHECK_DIM_SIZE(bias, 1, 0, weight->size[0]); } THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane); if (gradOutput != NULL) { THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimt, outputDepth); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, outputWidth); } } static int THNN_(view_weight)(THTensor **_weight) { THTensor *weight = *_weight; if (weight->nDimension == 5) { long s1 = weight->size[0]; long s2 = weight->size[1] * weight->size[2] * weight->size[3] * weight->size[4]; *_weight = THTensor_(newWithStorage2d)(weight->storage, weight->storageOffset, s1, -1, s2, -1); return 1; } return 0; } /* note: due to write issues, this one cannot be parallelized as well as unfolded_copy */ static void THNN_(unfolded_acc_vol)( THTensor *finput, THTensor *input, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, int nInputPlane, int inputDepth, int inputWidth, int inputHeight, int outputDepth, int outputWidth, int outputHeight) { int nip; real *input_data = THTensor_(data)(input); real *finput_data = THTensor_(data)(finput); //#pragma omp parallel for private(nip) for (nip = 0; nip < nInputPlane; nip++) { int kt, kw, kh, t, y, x, it, ix, iy; for (kt = 0; kt < kT; kt++) { for (kh = 0; kh < kH; kh++) { for (kw = 0; kw < kW; kw++) { real *src = finput_data + nip * (kT*kH*kW*outputDepth*outputHeight*outputWidth) + kt * (kH*kW*outputDepth*outputHeight*outputWidth) + kh * (kW*outputDepth*outputHeight*outputWidth) + kw * (outputDepth*outputHeight*outputWidth); real *dst = input_data + nip*(inputDepth*inputHeight*inputWidth); if (pT > 0 || pH > 0 || pW > 0) { for (t = 0; t < outputDepth; t++) { it = t*dT - pT + kt; for (y = 0; y < outputHeight; y++) { iy = y*dH - pH + kh; for (x = 0; x < outputWidth; x++) { ix = x*dW - pW + kw; if (it < 0 || it >= inputDepth || iy < 0 || iy >= inputHeight || ix < 0 || ix >= inputWidth) { } else { real *dst_slice = dst+it*inputHeight*inputWidth+iy*inputWidth+ix; THVector_(cadd)(dst_slice, dst_slice, src+t*outputHeight*outputWidth+y*outputWidth+x, 1, 1); } } } } } else { for (t = 0; t < outputDepth; t++) { it = t*dT + kt; for (y = 0; y < outputHeight; y++) { iy = y*dH + kh; for(x = 0; x < outputWidth; x++) { ix = x*dW + kw; real *dst_slice = dst+it*inputHeight*inputWidth+iy*inputWidth+ix; THVector_(cadd)(dst_slice, dst_slice, src+t*outputHeight*outputWidth+y*outputWidth+x, 1, 1); } } } } } } } } } static void THNN_(unfolded_copy_vol)( THTensor *finput, THTensor *input, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, int nInputPlane, int inputDepth, int inputWidth, int inputHeight, int outputDepth, int outputWidth, int outputHeight) { long k; real *input_data = THTensor_(data)(input); real *finput_data = THTensor_(data)(finput); // #pragma omp parallel for private(k) for (k = 0; k < nInputPlane*kT*kH*kW; k++) { int nip = k / (kT*kH*kW); int rest = k % (kT*kH*kW); int kt = rest / (kH*kW); rest = rest % (kH*kW); int kh = rest / kW; int kw = rest % kW; int t,x,y,it,ix,iy; real *dst = finput_data + nip * (kT*kH*kW*outputDepth*outputHeight*outputWidth) + kt * (kH*kW*outputDepth*outputHeight*outputWidth) + kh * (kW*outputDepth*outputHeight*outputWidth) + kw * (outputDepth*outputHeight*outputWidth); real *src = input_data + nip*(inputDepth*inputHeight*inputWidth); if (pT > 0 || pH > 0 || pW > 0) { for (t = 0; t < outputDepth; t++) { it = t*dT - pT + kt; for (y = 0; y < outputHeight; y++) { iy = y*dH - pH + kh; for (x = 0; x < outputWidth; x++) { ix = x*dW - pW + kw; if (it < 0 || it >= inputDepth || iy < 0 || iy >= inputHeight || ix < 0 || ix >= inputWidth) memset(dst+t*outputHeight*outputWidth+y*outputWidth+x, 0, sizeof(real)*(1)); else memcpy(dst+t*outputHeight*outputWidth+y*outputWidth+x, src+it*inputHeight*inputWidth+iy*inputWidth+ix, sizeof(real)*(1)); } } } } else { for (t = 0; t < outputDepth; t++) { it = t*dT + kt; for (y = 0; y < outputHeight; y++) { iy = y*dH + kh; for(x = 0; x < outputWidth; x++) { ix = x*dW + kw; memcpy(dst+t*outputHeight*outputWidth+y*outputWidth+x, src+it*inputHeight*inputWidth+iy*inputWidth+ix, sizeof(real)*(1)); } } } } } } static void THNN_(VolumetricConvolutionMM_updateOutput_frame)( THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, long nInputPlane, long inputDepth, long inputWidth, long inputHeight, long nOutputPlane, long outputDepth, long outputWidth, long outputHeight) { long i; THTensor *output2d; THNN_(unfolded_copy_vol)( finput, input, kT, kW, kH, dT, dW, dH, pT, pW, pH, nInputPlane, inputDepth, inputWidth, inputHeight, outputDepth, outputWidth, outputHeight ); output2d = THTensor_(newWithStorage2d)( output->storage, output->storageOffset, nOutputPlane, -1, outputDepth*outputHeight*outputWidth, -1 ); if (bias) { for (i = 0; i < nOutputPlane; i++) { THVector_(fill)( output->storage->data+output->storageOffset+output->stride[0]*i, THTensor_(get1d)(bias, i), outputDepth*outputHeight*outputWidth ); } } else { THTensor_(zero)(output); } THTensor_(addmm)(output2d, 1, output2d, 1, weight, finput); THTensor_(free)(output2d); } void THNN_(VolumetricConvolutionMM_updateOutput)( THNNState *state, THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH) { int dimf = 0; int dimt = 1; int dimh = 2; int dimw = 3; int freeWeight = 0; long nInputPlane; long inputDepth; long inputHeight; long inputWidth; long nOutputPlane; long outputDepth; long outputHeight; long outputWidth; THNN_(VolumetricConvolutionMM_shapeCheck)( state, input, NULL, weight, bias, kT, kW, kH, dT, dW, dH, pT, pW, pH); input = THTensor_(newContiguous)(input); if (input->nDimension == 5) { dimf++; dimt++; dimh++; dimw++; } nInputPlane = input->size[dimf]; inputDepth = input->size[dimt]; inputHeight = input->size[dimh]; inputWidth = input->size[dimw]; nOutputPlane = weight->size[0]; outputDepth = (inputDepth + 2*pT - kT) / dT + 1; outputHeight = (inputHeight + 2*pH - kH) / dH + 1; outputWidth = (inputWidth + 2*pW - kW) / dW + 1; freeWeight = THNN_(view_weight)(&weight); if (input->nDimension == 4) { THTensor_(resize2d)(finput, kT*kW*kH*nInputPlane, outputDepth*outputHeight*outputWidth); THTensor_(resize4d)(output, nOutputPlane, outputDepth, outputHeight, outputWidth); THNN_(VolumetricConvolutionMM_updateOutput_frame)( input, output, weight, bias, finput, kT, kW, kH, dT, dW, dH, pT, pW, pH, nInputPlane, inputDepth, inputWidth, inputHeight, nOutputPlane, outputDepth, outputWidth, outputHeight ); } else { long T = input->size[0]; long t; THTensor_(resize3d)(finput, T, kT*kW*kH*nInputPlane, outputDepth*outputHeight*outputWidth); THTensor_(resize5d)(output, T, nOutputPlane, outputDepth, outputHeight, outputWidth); // #pragma omp parallel for private(t) for (t = 0; t < T; t++) { THTensor *input_t = THTensor_(newSelect)(input, 0, t); THTensor *output_t = THTensor_(newSelect)(output, 0, t); THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); THNN_(VolumetricConvolutionMM_updateOutput_frame)( input_t, output_t, weight, bias, finput_t, kT, kW, kH, dT, dW, dH, pT, pW, pH, nInputPlane, inputDepth, inputWidth, inputHeight, nOutputPlane, outputDepth, outputWidth, outputHeight ); THTensor_(free)(input_t); THTensor_(free)(output_t); THTensor_(free)(finput_t); } } THTensor_(free)(input); if (freeWeight) THTensor_(free)(weight); } static void THNN_(VolumetricConvolutionMM_updateGradInput_frame)( THTensor *gradInput, THTensor *gradOutput, THTensor *weight, THTensor *fgradInput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH) { THTensor *gradOutput2d = THTensor_(newWithStorage2d)( gradOutput->storage, gradOutput->storageOffset, gradOutput->size[0], -1, gradOutput->size[1]*gradOutput->size[2]*gradOutput->size[3], -1 ); THTensor_(addmm)(fgradInput, 0, fgradInput, 1, weight, gradOutput2d); THTensor_(free)(gradOutput2d); THTensor_(zero)(gradInput); THNN_(unfolded_acc_vol)( fgradInput, gradInput, kT, kW, kH, dT, dW, dH, pT, pW, pH, gradInput->size[0], gradInput->size[1], gradInput->size[3], gradInput->size[2], gradOutput->size[1], gradOutput->size[3], gradOutput->size[2] ); } void THNN_(VolumetricConvolutionMM_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight, THTensor *finput, THTensor *fgradInput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH) { int nOutputPlane = (int)weight->size[0]; THNN_(VolumetricConvolutionMM_shapeCheck)( state, input, gradOutput, weight, NULL, kT, kW, kH, dT, dW, dH, pT, pW, pH); input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); int freeWeight = THNN_(view_weight)(&weight); THTensor_(resizeAs)(gradInput, input); THTensor_(resizeAs)(fgradInput, finput); // depending on the BLAS library, fgradInput (result tensor) might // be left uninitialized on zero alpha, which might lead to weird behavior // hence, to be safe, zero it THTensor_(zero)(fgradInput); THTensor *tweight = THTensor_(new)(); THTensor_(transpose)(tweight, weight, 0, 1); if (input->nDimension == 4) { THNN_(VolumetricConvolutionMM_updateGradInput_frame)( gradInput, gradOutput, tweight, fgradInput, kT, kW, kH, dT, dW, dH, pT, pW, pH ); } else { long T = input->size[0]; long t; //#pragma omp parallel for private(t) for (t = 0; t < T; t++) { THTensor *gradInput_t = THTensor_(newSelect)(gradInput, 0, t); THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t); THNN_(VolumetricConvolutionMM_updateGradInput_frame)( gradInput_t, gradOutput_t, tweight, fgradInput_t, kT, kW, kH, dT, dW, dH, pT, pW, pH ); THTensor_(free)(gradInput_t); THTensor_(free)(gradOutput_t); THTensor_(free)(fgradInput_t); } } THTensor_(free)(tweight); THTensor_(free)(input); THTensor_(free)(gradOutput); if (freeWeight) THTensor_(free)(weight); } static void THNN_(VolumetricConvolutionMM_accGradParameters_frame)( THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, real scale) { long i; THTensor *gradOutput2d = THTensor_(newWithStorage2d)( gradOutput->storage, gradOutput->storageOffset, gradOutput->size[0], -1, gradOutput->size[1]*gradOutput->size[2]*gradOutput->size[3], -1 ); THTensor *tfinput = THTensor_(new)(); THTensor_(transpose)(tfinput, finput, 0, 1); THTensor_(addmm)(gradWeight, 1, gradWeight, scale, gradOutput2d, tfinput); THTensor_(free)(tfinput); if (gradBias) { for (i = 0; i < gradBias->size[0]; i++) { long k; real sum = 0; real *data = gradOutput2d->storage->data + gradOutput2d->storageOffset + i*gradOutput2d->stride[0]; for (k = 0; k < gradOutput2d->size[1]; k++) sum += data[k]; (gradBias->storage->data + gradBias->storageOffset)[i] += scale * sum; } } THTensor_(free)(gradOutput2d); } void THNN_(VolumetricConvolutionMM_accGradParameters)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, accreal scale_) { real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); int freeWeight; int nOutputPlane = (int)gradWeight->size[0]; THNN_(VolumetricConvolutionMM_shapeCheck)( state, input, gradOutput, gradWeight, gradBias, kT, kW, kH, dT, dW, dH, pT, pW, pH); input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); freeWeight = THNN_(view_weight)(&gradWeight); if (input->nDimension == 4) // non-batch mode { THNN_(VolumetricConvolutionMM_accGradParameters_frame)(gradOutput, gradWeight, gradBias, finput, scale); } else // batch mode { long T = input->size[0]; long t; for (t = 0; t < T; t++) { THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); THNN_(VolumetricConvolutionMM_accGradParameters_frame)(gradOutput_t, gradWeight, gradBias, finput_t, scale); THTensor_(free)(gradOutput_t); THTensor_(free)(finput_t); } } THTensor_(free)(input); THTensor_(free)(gradOutput); if (freeWeight) THTensor_(free)(gradWeight); } #endif
GB_unaryop__identity_int32_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int32_fp64 // op(A') function: GB_tran__identity_int32_fp64 // C type: int32_t // A type: double // cast: int32_t cij ; GB_CAST_SIGNED(cij,aij,32) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ int32_t z ; GB_CAST_SIGNED(z,x,32) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int32_fp64 ( int32_t *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int32_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
rar_common.c
/* * This software is Copyright (c) 2011, Dhiru Kholia <dhiru.kholia at gmail.com> * and Copyright (c) 2012, magnum * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #include "misc.h" // error() static int omp_t = 1; static unsigned char *saved_salt; static unsigned char *saved_key; static int (*cracked); static unpack_data_t (*unpack_data); static unsigned int *saved_len; static unsigned char *aes_key; static unsigned char *aes_iv; /* cRARk use 4-char passwords for CPU benchmark */ static struct fmt_tests cpu_tests[] = { {"$RAR3$*0*b109105f5fe0b899*d4f96690b1a8fe1f120b0290a85a2121", "test"}, {"$RAR3$*0*42ff7e92f24fb2f8*9d8516c8c847f1b941a0feef064aaf0d", "1234"}, {"$RAR3$*0*56ce6de6ddee17fb*4c957e533e00b0e18dfad6accc490ad9", "john"}, /* -p mode tests, -m0 and -m3 (in that order) */ {"$RAR3$*1*c47c5bef0bbd1e98*965f1453*48*47*1*c5e987f81d316d9dcfdb6a1b27105ce63fca2c594da5aa2f6fdf2f65f50f0d66314f8a09da875ae19d6c15636b65c815*30", "test"}, {"$RAR3$*1*b4eee1a48dc95d12*965f1453*64*47*1*0fe529478798c0960dd88a38a05451f9559e15f0cf20b4cac58260b0e5b56699d5871bdcc35bee099cc131eb35b9a116adaedf5ecc26b1c09cadf5185b3092e6*33", "test"}, #ifdef DEBUG /* Various lengths, these should be in self-test but not benchmark */ /* from CMIYC 2012 */ {"$RAR3$*1*0f263dd52eead558*834015cd*384*693*1*e28e9648f51b59e32f573b302f0e94aadf1050678b90c38dd4e750c7dd281d439ab4cccec5f1bd1ac40b6a1ead60c75625666307171e0fe2639d2397d5f68b97a2a1f733289eac0038b52ec6c3593ff07298fce09118c255b2747a02c2fa3175ab81166ebff2f1f104b9f6284a66f598764bd01f093562b5eeb9471d977bf3d33901acfd9643afe460e1d10b90e0e9bc8b77dc9ac40d40c2d211df9b0ecbcaea72c9d8f15859d59b3c85149b5bb5f56f0218cbbd9f28790777c39e3e499bc207289727afb2b2e02541b726e9ac028f4f05a4d7930efbff97d1ffd786c4a195bbed74997469802159f3b0ae05b703238da264087b6c2729d9023f67c42c5cbe40b6c67eebbfc4658dfb99bfcb523f62133113735e862c1430adf59c837305446e8e34fac00620b99f574fabeb2cd34dc72752014cbf4bd64d35f17cef6d40747c81b12d8c0cd4472089889a53f4d810b212fb314bf58c3dd36796de0feeefaf26be20c6a2fd00517152c58d0b1a95775ef6a1374c608f55f416b78b8c81761f1d*33:1::to-submit-challenges.txt", "wachtwoord"}, {"$RAR3$*1*9759543e04fe3a22*834015cd*384*693*1*cdd2e2478e5153a581c47a201490f5d9b69e01584ae488a2a40203da9ba8c5271ed8edc8f91a7bd262bb5e5de07ecbe9e2003d054a314d16caf2ea1de9f54303abdee1ed044396f7e29c40c38e638f626442efd9f511b4743758cd4a6025c5af81d1252475964937d80bfd50d10c171e7e4041a66c02a74b2b451ae83b6807990fb0652a8cdab530c5a0c497575a6e6cbe2db2035217fe849d2e0b8693b70f3f97b757229b4e89c8273197602c23cc04ff5f24abf3d3c7eb686fc3eddce1bfe710cc0b6e8bd012928127da38c38dd8f056095982afacb4578f6280d51c6739739e033674a9413ca88053f8264c5137d4ac018125c041a3489daaf175ef75e9282d245b92948c1bbcf1c5f25b7028f6d207d87fe9598c2c7ccd1553e842a91ab8ca9261a51b14601a756070388d08039466dfa36f0b4c7ea7dd9ff25c9d98687203c58f9ec8757cafe4d2ed785d5a9e6d5ea838e4cc246a9e6d3c30979dcce56b380b05f9103e6443b35357550b50229c47f845a93a48602790096828d9d6bef0*33:1::to-submit-challenges.txt", "Sleepingbaby210"}, {"$RAR3$*1*79e17c26407a7d52*834015cd*384*693*1*6844a189e732e9390b5a958b623589d5423fa432d756fd00940ac31e245214983507a035d4e0ee09469491551759a66c12150fe6c5d05f334fb0d8302a96d48ef4da04954222e0705507aaa84f8b137f284dbec344eee9cea6b2c4f63540c64df3ee8be3013466d238c5999e9a98eb6375ec5462869bba43401ec95077d0c593352339902c24a3324178e08fe694d11bfec646c652ffeafbdda929052c370ffd89168c83194fedf7c50fc7d9a1fbe64332063d267a181eb07b5d70a5854067db9b66c12703fde62728d3680cf3fdb9933a0f02bfc94f3a682ad5e7c428d7ed44d5ff554a8a445dea28b81e3a2631870e17f3f3c0c0204136802c0701590cc3e4c0ccd9f15e8be245ce9caa6969fab9e8443ac9ad9e73e7446811aee971808350c38c16c0d3372c7f44174666d770e3dd321e8b08fb2dc5e8a6a5b2a1720bad66e54abc194faabc5f24225dd8fee137ba5d4c2ed48c6462618e6333300a5b8dfc75c65608925e786eb0988f7b3a5ab106a55168d1001adc47ce95bba77b38c35b*33:1::to-submit-challenges.txt", "P-i-r-A-T-E"}, {"$RAR3$*1*e1df79fd9ee1dadf*771a163b*64*39*1*edc483d67b94ab22a0a9b8375a461e06fa1108fa72970e16d962092c311970d26eb92a033a42f53027bdc0bb47231a12ed968c8d530a9486a90cbbc00040569b*33", "333"}, {"$RAR3$*1*c83c00534d4af2db*771a163b*64*39*1*05244526d6b32cb9c524a15c79d19bba685f7fc3007a9171c65fc826481f2dce70be6148f2c3497f0d549aa4e864f73d4e4f697fdb66ff528ed1503d9712a414*33", "11eleven111"}, {"$RAR3$*0*c203c4d80a8a09dc*49bbecccc08b5d893f308bce7ad36c0f", "sator"}, {"$RAR3$*0*672fca155cb74ac3*8d534cd5f47a58f6493012cf76d2a68b", "arepo"}, {"$RAR3$*0*c203c4d80a8a09dc*c3055efe7ca6587127fd541a5b88e0e4", "tenet"}, {"$RAR3$*0*672fca155cb74ac3*c760267628f94060cca57be5896003c8", "opera"}, {"$RAR3$*0*c203c4d80a8a09dc*1f406154556d4c895a8be207fd2b5d0c", "rotas"}, {"$RAR3$*0*345f5f573a077ad7*638e388817cc7851e313406fd77730b9", "Boustrophedon"}, {"$RAR3$*0*c9dea41b149b53b4*fcbdb66122d8ebdb32532c22ca7ab9ec", "password"}, {"$RAR3$*0*7ce241baa2bd521b*f2b26d76424efa351c728b321671d074", "@"}, {"$RAR3$*0*ea0ea55ce549c8ab*cf89099c620fcc244bdcbae55a616e76", "ow"}, {"$RAR3$*0*ea0ea55ce549c8ab*6a35a76b1ce9ddc4229b9166d60dc113", "aes"}, {"$RAR3$*0*ea0ea55ce549c8ab*1830771da109f53e2d6e626be16c2666", "sha1"}, {"$RAR3$*0*7e52d3eba9bad316*ee8e1edd435cfa9b8ab861d958a4d588", "fiver"}, {"$RAR3$*0*7e52d3eba9bad316*01987735ab0be7b6538470bd5f5fbf80", "magnum"}, {"$RAR3$*0*7e52d3eba9bad316*f2fe986ed266c6617c48d04a429cf2e3", "7777777"}, {"$RAR3$*0*7e52d3eba9bad316*f0ad6e7fdff9f82fff2aa990105fde21", "password"}, {"$RAR3$*0*7ce241baa2bd521b*3eb0017fa8843017952c53a3ac8332b6", "nine9nine"}, {"$RAR3$*0*7ce241baa2bd521b*ccbf0c3f8e059274606f33cc388b8a2f", "10tenten10"}, {"$RAR3$*0*5fa43f823a60da63*af2630863e12046e42c4501c915636c9", "eleven11111"}, {"$RAR3$*0*5fa43f823a60da63*88c0840d0bd98844173d35f867558ec2", "twelve121212"}, {"$RAR3$*0*4768100a172fa2b6*48edcb5283ee2e4f0e8edb25d0d85eaa", "subconsciousness"}, #endif {NULL} }; #ifdef RAR_OPENCL_FORMAT /* cRARk use 5-char passwords for GPU benchmark */ static struct fmt_tests gpu_tests[] = { {"$RAR3$*0*c203c4d80a8a09dc*49bbecccc08b5d893f308bce7ad36c0f", "sator"}, {"$RAR3$*0*672fca155cb74ac3*8d534cd5f47a58f6493012cf76d2a68b", "arepo"}, {"$RAR3$*0*c203c4d80a8a09dc*c3055efe7ca6587127fd541a5b88e0e4", "tenet"}, {"$RAR3$*0*672fca155cb74ac3*c760267628f94060cca57be5896003c8", "opera"}, {"$RAR3$*0*c203c4d80a8a09dc*1f406154556d4c895a8be207fd2b5d0c", "rotas"}, /* -p mode tests, -m0 and -m3 (in that order) */ {"$RAR3$*1*c47c5bef0bbd1e98*965f1453*48*47*1*c5e987f81d316d9dcfdb6a1b27105ce63fca2c594da5aa2f6fdf2f65f50f0d66314f8a09da875ae19d6c15636b65c815*30", "test"}, {"$RAR3$*1*b4eee1a48dc95d12*965f1453*64*47*1*0fe529478798c0960dd88a38a05451f9559e15f0cf20b4cac58260b0e5b56699d5871bdcc35bee099cc131eb35b9a116adaedf5ecc26b1c09cadf5185b3092e6*33", "test"}, #ifdef DEBUG {"$RAR3$*0*af24c0c95e9cafc7*e7f207f30dec96a5ad6f917a69d0209e", "magnum"}, {"$RAR3$*0*2653b9204daa2a8e*39b11a475f486206e2ec6070698d9bbc", "123456"}, {"$RAR3$*0*63f1649f16c2b687*8a89f6453297bcdb66bd756fa10ddd98", "abc123"}, /* -p mode tests, -m0 and -m3 (in that order) */ {"$RAR3$*1*575b083d78672e85*965f1453*48*47*1*cd3d8756438f43ab70e668792e28053f0ad7449af1c66863e3e55332bfa304b2c082b9f23b36cd4a8ebc0b743618c5b2*30", "magnum"}, {"$RAR3$*1*6f5954680c87535a*965f1453*64*47*1*c9bb398b9a5d54f035fd22be54bc6dc75822f55833f30eb4fb8cc0b8218e41e6d01824e3467475b90b994a5ddb7fe19366d293c9ee305316c2a60c3a7eb3ce5a*33", "magnum"}, /* Various lengths, these should be in self-test but not benchmark */ /* from CMIYC 2012 */ {"$RAR3$*1*0f263dd52eead558*834015cd*384*693*1*e28e9648f51b59e32f573b302f0e94aadf1050678b90c38dd4e750c7dd281d439ab4cccec5f1bd1ac40b6a1ead60c75625666307171e0fe2639d2397d5f68b97a2a1f733289eac0038b52ec6c3593ff07298fce09118c255b2747a02c2fa3175ab81166ebff2f1f104b9f6284a66f598764bd01f093562b5eeb9471d977bf3d33901acfd9643afe460e1d10b90e0e9bc8b77dc9ac40d40c2d211df9b0ecbcaea72c9d8f15859d59b3c85149b5bb5f56f0218cbbd9f28790777c39e3e499bc207289727afb2b2e02541b726e9ac028f4f05a4d7930efbff97d1ffd786c4a195bbed74997469802159f3b0ae05b703238da264087b6c2729d9023f67c42c5cbe40b6c67eebbfc4658dfb99bfcb523f62133113735e862c1430adf59c837305446e8e34fac00620b99f574fabeb2cd34dc72752014cbf4bd64d35f17cef6d40747c81b12d8c0cd4472089889a53f4d810b212fb314bf58c3dd36796de0feeefaf26be20c6a2fd00517152c58d0b1a95775ef6a1374c608f55f416b78b8c81761f1d*33:1::to-submit-challenges.txt", "wachtwoord"}, {"$RAR3$*1*9759543e04fe3a22*834015cd*384*693*1*cdd2e2478e5153a581c47a201490f5d9b69e01584ae488a2a40203da9ba8c5271ed8edc8f91a7bd262bb5e5de07ecbe9e2003d054a314d16caf2ea1de9f54303abdee1ed044396f7e29c40c38e638f626442efd9f511b4743758cd4a6025c5af81d1252475964937d80bfd50d10c171e7e4041a66c02a74b2b451ae83b6807990fb0652a8cdab530c5a0c497575a6e6cbe2db2035217fe849d2e0b8693b70f3f97b757229b4e89c8273197602c23cc04ff5f24abf3d3c7eb686fc3eddce1bfe710cc0b6e8bd012928127da38c38dd8f056095982afacb4578f6280d51c6739739e033674a9413ca88053f8264c5137d4ac018125c041a3489daaf175ef75e9282d245b92948c1bbcf1c5f25b7028f6d207d87fe9598c2c7ccd1553e842a91ab8ca9261a51b14601a756070388d08039466dfa36f0b4c7ea7dd9ff25c9d98687203c58f9ec8757cafe4d2ed785d5a9e6d5ea838e4cc246a9e6d3c30979dcce56b380b05f9103e6443b35357550b50229c47f845a93a48602790096828d9d6bef0*33:1::to-submit-challenges.txt", "Sleepingbaby210"}, {"$RAR3$*1*79e17c26407a7d52*834015cd*384*693*1*6844a189e732e9390b5a958b623589d5423fa432d756fd00940ac31e245214983507a035d4e0ee09469491551759a66c12150fe6c5d05f334fb0d8302a96d48ef4da04954222e0705507aaa84f8b137f284dbec344eee9cea6b2c4f63540c64df3ee8be3013466d238c5999e9a98eb6375ec5462869bba43401ec95077d0c593352339902c24a3324178e08fe694d11bfec646c652ffeafbdda929052c370ffd89168c83194fedf7c50fc7d9a1fbe64332063d267a181eb07b5d70a5854067db9b66c12703fde62728d3680cf3fdb9933a0f02bfc94f3a682ad5e7c428d7ed44d5ff554a8a445dea28b81e3a2631870e17f3f3c0c0204136802c0701590cc3e4c0ccd9f15e8be245ce9caa6969fab9e8443ac9ad9e73e7446811aee971808350c38c16c0d3372c7f44174666d770e3dd321e8b08fb2dc5e8a6a5b2a1720bad66e54abc194faabc5f24225dd8fee137ba5d4c2ed48c6462618e6333300a5b8dfc75c65608925e786eb0988f7b3a5ab106a55168d1001adc47ce95bba77b38c35b*33:1::to-submit-challenges.txt", "P-i-r-A-T-E"}, {"$RAR3$*1*e1df79fd9ee1dadf*771a163b*64*39*1*edc483d67b94ab22a0a9b8375a461e06fa1108fa72970e16d962092c311970d26eb92a033a42f53027bdc0bb47231a12ed968c8d530a9486a90cbbc00040569b*33", "333"}, {"$RAR3$*1*c83c00534d4af2db*771a163b*64*39*1*05244526d6b32cb9c524a15c79d19bba685f7fc3007a9171c65fc826481f2dce70be6148f2c3497f0d549aa4e864f73d4e4f697fdb66ff528ed1503d9712a414*33", "11eleven111"}, {"$RAR3$*0*345f5f573a077ad7*638e388817cc7851e313406fd77730b9", "Boustrophedon"}, {"$RAR3$*0*c9dea41b149b53b4*fcbdb66122d8ebdb32532c22ca7ab9ec", "password"}, {"$RAR3$*0*7ce241baa2bd521b*f2b26d76424efa351c728b321671d074", "@"}, {"$RAR3$*0*ea0ea55ce549c8ab*cf89099c620fcc244bdcbae55a616e76", "ow"}, {"$RAR3$*0*ea0ea55ce549c8ab*6a35a76b1ce9ddc4229b9166d60dc113", "aes"}, {"$RAR3$*0*ea0ea55ce549c8ab*1830771da109f53e2d6e626be16c2666", "sha1"}, {"$RAR3$*0*7e52d3eba9bad316*ee8e1edd435cfa9b8ab861d958a4d588", "fiver"}, {"$RAR3$*0*7e52d3eba9bad316*01987735ab0be7b6538470bd5f5fbf80", "magnum"}, {"$RAR3$*0*7e52d3eba9bad316*f2fe986ed266c6617c48d04a429cf2e3", "7777777"}, {"$RAR3$*0*7e52d3eba9bad316*f0ad6e7fdff9f82fff2aa990105fde21", "password"}, {"$RAR3$*0*7ce241baa2bd521b*3eb0017fa8843017952c53a3ac8332b6", "nine9nine"}, {"$RAR3$*0*7ce241baa2bd521b*ccbf0c3f8e059274606f33cc388b8a2f", "10tenten10"}, {"$RAR3$*0*5fa43f823a60da63*af2630863e12046e42c4501c915636c9", "eleven11111"}, {"$RAR3$*0*5fa43f823a60da63*88c0840d0bd98844173d35f867558ec2", "twelve121212"}, {"$RAR3$*0*4768100a172fa2b6*48edcb5283ee2e4f0e8edb25d0d85eaa", "subconsciousness"}, #endif {NULL} }; #endif typedef struct { dyna_salt dsalt; /* must be first. allows dyna_salt to work */ /* place all items we are NOT going to use for salt comparison, first */ unsigned char *blob; /* data from this point on, is part of the salt for compare reasons */ unsigned char salt[8]; int type; /* 0 = -hp, 1 = -p */ /* for rar -p mode only: */ union { unsigned int w; unsigned char c[4]; } crc; unsigned long long pack_size; unsigned long long unp_size; int method; unsigned char blob_hash[20]; // holds an sha1, but could be 'any' hash. // raw_data should be word aligned, and 'ok' unsigned char raw_data[1]; } rarfile; static rarfile *cur_file; #undef set_key static void set_key(char *key, int index) { int plen; UTF16 buf[PLAINTEXT_LENGTH + 1]; /* UTF-16LE encode the password, encoding aware */ plen = enc_to_utf16(buf, PLAINTEXT_LENGTH, (UTF8*) key, strlen(key)); if (plen < 0) plen = strlen16(buf); memcpy(&saved_key[UNICODE_LENGTH * index], buf, UNICODE_LENGTH); saved_len[index] = plen << 1; #ifdef RAR_OPENCL_FORMAT new_keys = 1; #endif } static void *get_salt(char *ciphertext) { unsigned int i, type, ex_len; static unsigned char *ptr; /* extract data from "salt" */ char *encoded_salt; char *saltcopy = strdup(ciphertext); char *keep_ptr = saltcopy; rarfile *psalt; unsigned char tmp_salt[8]; int inlined = 1; SHA_CTX ctx; if (!ptr) ptr = mem_alloc_tiny(sizeof(rarfile*),sizeof(rarfile*)); saltcopy += 7; /* skip over "$RAR3$*" */ type = atoi(strtokm(saltcopy, "*")); encoded_salt = strtokm(NULL, "*"); for (i = 0; i < 8; i++) tmp_salt[i] = atoi16[ARCH_INDEX(encoded_salt[i * 2])] * 16 + atoi16[ARCH_INDEX(encoded_salt[i * 2 + 1])]; if (type == 0) { /* rar-hp mode */ char *encoded_ct = strtokm(NULL, "*"); psalt = mem_calloc(1, sizeof(*psalt)+16); psalt->type = type; ex_len = 16; memcpy(psalt->salt, tmp_salt, 8); for (i = 0; i < 16; i++) psalt->raw_data[i] = atoi16[ARCH_INDEX(encoded_ct[i * 2])] * 16 + atoi16[ARCH_INDEX(encoded_ct[i * 2 + 1])]; psalt->blob = psalt->raw_data; psalt->pack_size = 16; } else { char *p = strtokm(NULL, "*"); char crc_c[4]; unsigned long long pack_size; unsigned long long unp_size; for (i = 0; i < 4; i++) crc_c[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; pack_size = atoll(strtokm(NULL, "*")); unp_size = atoll(strtokm(NULL, "*")); inlined = atoi(strtokm(NULL, "*")); ex_len = pack_size; /* load ciphertext. We allocate and load all files here, and they are freed when password found. */ #if HAVE_MMAP psalt = mem_calloc(1, sizeof(*psalt) + (inlined ? ex_len : 0)); #else psalt = mem_calloc(1, sizeof(*psalt) + ex_len); #endif psalt->type = type; memcpy(psalt->salt, tmp_salt, 8); psalt->pack_size = pack_size; psalt->unp_size = unp_size; memcpy(psalt->crc.c, crc_c, 4); if (inlined) { unsigned char *d = psalt->raw_data; p = strtokm(NULL, "*"); for (i = 0; i < psalt->pack_size; i++) *d++ = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; psalt->blob = psalt->raw_data; } else { FILE *fp; char *archive_name = strtokm(NULL, "*"); long long pos = atoll(strtokm(NULL, "*")); #if HAVE_MMAP if (!(fp = fopen(archive_name, "rb"))) { fprintf(stderr, "! %s: %s\n", archive_name, strerror(errno)); error(); } #ifdef DEBUG fprintf(stderr, "RAR mmap() len "LLu" offset 0\n", pos + psalt->pack_size); #endif psalt->blob = mmap(NULL, pos + psalt->pack_size, PROT_READ, MAP_SHARED, fileno(fp), 0); if (psalt->blob == MAP_FAILED) { fprintf(stderr, "Error loading file from " "archive '%s'. Archive possibly " "damaged.\n", archive_name); error(); } psalt->blob += pos; #else size_t count; if (!(fp = fopen(archive_name, "rb"))) { fprintf(stderr, "! %s: %s\n", archive_name, strerror(errno)); error(); } jtr_fseek64(fp, pos, SEEK_SET); count = fread(psalt->raw_data, 1, psalt->pack_size, fp); if (count != psalt->pack_size) { fprintf(stderr, "Error loading file from archive '%s', expected "LLu" bytes, got "Zu". Archive possibly damaged.\n", archive_name, psalt->pack_size, count); error(); } psalt->blob = psalt->raw_data; #endif fclose(fp); } p = strtokm(NULL, "*"); psalt->method = atoi16[ARCH_INDEX(p[0])] * 16 + atoi16[ARCH_INDEX(p[1])]; if (psalt->method != 0x30) #if ARCH_LITTLE_ENDIAN psalt->crc.w = ~psalt->crc.w; #else psalt->crc.w = JOHNSWAP(~psalt->crc.w); #endif } SHA1_Init(&ctx); SHA1_Update(&ctx, psalt->blob, psalt->pack_size); SHA1_Final(psalt->blob_hash, &ctx); MEM_FREE(keep_ptr); #if HAVE_MMAP psalt->dsalt.salt_alloc_needs_free = inlined; #else psalt->dsalt.salt_alloc_needs_free = 1; #endif psalt->dsalt.salt_cmp_offset = SALT_CMP_OFF(rarfile, salt); psalt->dsalt.salt_cmp_size = SALT_CMP_SIZE(rarfile, salt, raw_data, 0); memcpy(ptr, &psalt, sizeof(rarfile*)); return (void*)ptr; } static void set_salt(void *salt) { cur_file = *((rarfile**)salt); memcpy(saved_salt, cur_file->salt, 8); #ifdef RAR_OPENCL_FORMAT HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], cl_salt, CL_FALSE, 0, 8, saved_salt, 0, NULL, NULL), "failed in clEnqueueWriteBuffer saved_salt"); #endif } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *ptr, *keeptr; int mode; if (strncmp(ciphertext, "$RAR3$*", 7)) return 0; if (!(ctcopy = strdup(ciphertext))) { fprintf(stderr, "Memory allocation failed in %s, unable to check if hash is valid!", FORMAT_LABEL); return 0; } keeptr = ctcopy; ctcopy += 7; if (!(ptr = strtokm(ctcopy, "*"))) /* -p or -h mode */ goto error; if (strlen(ptr) != 1 || !isdec(ptr)) goto error; mode = atoi(ptr); if (mode > 1) goto error; if (!(ptr = strtokm(NULL, "*"))) /* salt */ goto error; if (hexlenl(ptr) != 16) /* 8 bytes of salt */ goto error; if (!(ptr = strtokm(NULL, "*"))) goto error; if (mode == 0) { if (hexlenl(ptr) != 32) /* 16 bytes of encrypted known plain */ goto error; MEM_FREE(keeptr); return 1; } else { int inlined; long long plen, ulen; if (hexlenl(ptr) != 8) /* 4 bytes of CRC */ goto error; if (!(ptr = strtokm(NULL, "*"))) /* pack_size */ goto error; if (strlen(ptr) > 12) { // pack_size > 1 TB? Really? static int warn_once_pack_size = 1; if (warn_once_pack_size) { fprintf(stderr, "pack_size > 1TB not supported (%s)\n", FORMAT_NAME); warn_once_pack_size = 0; } goto error; } if ((plen = atoll(ptr)) < 16) goto error; if (!(ptr = strtokm(NULL, "*"))) /* unp_size */ goto error; if (strlen(ptr) > 12) { static int warn_once_unp_size = 1; if (warn_once_unp_size) { fprintf(stderr, "unp_size > 1TB not supported (%s)\n", FORMAT_NAME); warn_once_unp_size = 0; } goto error; } if ((ulen = atoll(ptr)) < 1) goto error; if (!(ptr = strtokm(NULL, "*"))) /* inlined */ goto error; if (strlen(ptr) != 1 || !isdec(ptr)) goto error; inlined = atoi(ptr); if (inlined > 1) goto error; if (!(ptr = strtokm(NULL, "*"))) /* pack_size / archive_name */ goto error; if (inlined) { if (hexlenl(ptr) != plen * 2) goto error; } else { FILE *fp; char *archive_name; archive_name = ptr; if (!(fp = fopen(archive_name, "rb"))) { if (!ldr_in_pot) fprintf(stderr, "! %s: %s, skipping.\n", archive_name, strerror(errno)); goto error; } if (!(ptr = strtokm(NULL, "*"))) /* pos */ goto error; /* We could go on and actually try seeking to pos but this is enough for now */ fclose(fp); } if (!(ptr = strtokm(NULL, "*"))) /* method */ goto error; } MEM_FREE(keeptr); return 1; error: #ifdef RAR_DEBUG { char buf[68]; strnzcpy(buf, ciphertext, sizeof(buf)); fprintf(stderr, "rejecting %s\n", buf); } #endif MEM_FREE(keeptr); return 0; } static char *get_key(int index) { UTF16 tmpbuf[PLAINTEXT_LENGTH + 1]; memcpy(tmpbuf, &((UTF16*) saved_key)[index * PLAINTEXT_LENGTH], saved_len[index]); memset(&tmpbuf[saved_len[index] >> 1], 0, 2); return (char*) utf16_to_enc(tmpbuf); } #define ADD_BITS(n) \ { \ if (bits < 9) { \ hold |= ((unsigned int)*next++ << (24 - bits)); \ bits += 8; \ } \ hold <<= n; \ bits -= n; \ } /* * This function is loosely based on JimF's check_inflate_CODE2() from * pkzip_fmt. Together with the other bit-checks, we are rejecting over 96% * of the candidates without resorting to a slow full check (which in turn * may reject semi-early, especially if it's a PPM block) * * Input is first 16 bytes of RAR buffer decrypted, as-is. It also contain the * first 2 bits, which have already been decoded, and have told us we had an * LZ block (RAR always use dynamic Huffman table) and keepOldTable was not set. * * RAR use 20 x (4 bits length, optionally 4 bits zerocount), and reversed * byte order. */ static MAYBE_INLINE int check_huffman(unsigned char *next) { unsigned int bits, hold, i; int left; unsigned int ncount[4]; unsigned char *count = (unsigned char*)ncount; unsigned char bit_length[20]; #ifdef DEBUG unsigned char *was = next; #endif #if ARCH_LITTLE_ENDIAN && ARCH_ALLOWS_UNALIGNED hold = JOHNSWAP(*(unsigned int*)next); #else hold = next[3] + (((unsigned int)next[2]) << 8) + (((unsigned int)next[1]) << 16) + (((unsigned int)next[0]) << 24); #endif next += 4; // we already have the first 32 bits hold <<= 2; // we already processed 2 bits, PPM and keepOldTable bits = 32 - 2; /* First, read 20 pairs of (bitlength[, zerocount]) */ for (i = 0 ; i < 20 ; i++) { int length, zero_count; length = hold >> 28; ADD_BITS(4); if (length == 15) { zero_count = hold >> 28; ADD_BITS(4); if (zero_count == 0) { bit_length[i] = 15; } else { zero_count += 2; while (zero_count-- > 0 && i < sizeof(bit_length) / sizeof(bit_length[0])) bit_length[i++] = 0; i--; } } else { bit_length[i] = length; } } #ifdef DEBUG if (next - was > 16) { fprintf(stderr, "*** (possible) BUG: check_huffman() needed %u bytes, we only have 16 (bits=%d, hold=0x%08x)\n", (int)(next - was), bits, hold); dump_stuff_msg("complete buffer", was, 16); error(); } #endif /* Count the number of codes for each code length */ memset(count, 0, 16); for (i = 0; i < 20; i++) { ++count[bit_length[i]]; } count[0] = 0; if (!ncount[0] && !ncount[1] && !ncount[2] && !ncount[3]) return 0; /* No codes at all */ left = 1; for (i = 1; i < 16; ++i) { left <<= 1; left -= count[i]; if (left < 0) { return 0; /* over-subscribed */ } } if (left) { return 0; /* incomplete set */ } return 1; /* Passed this check! */ } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (cracked[index]) return 1; return 0; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } static inline void check_rar(int count) { unsigned int index; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { AES_KEY aes_ctx; unsigned char *key = &aes_key[index * 16]; unsigned char *iv = &aes_iv[index * 16]; AES_set_decrypt_key(key, 128, &aes_ctx); /* AES decrypt, uses aes_iv, aes_key and blob */ if (cur_file->type == 0) { /* rar-hp mode */ unsigned char plain[16]; AES_cbc_encrypt(cur_file->blob, plain, 16, &aes_ctx, iv, AES_DECRYPT); cracked[index] = !memcmp(plain, "\xc4\x3d\x7b\x00\x40\x07\x00", 7); } else { if (cur_file->method == 0x30) { /* stored, not deflated */ CRC32_t crc; unsigned char crc_out[4]; unsigned char plain[0x8000]; unsigned long long size = cur_file->unp_size; unsigned char *cipher = cur_file->blob; /* Use full decryption with CRC check. Compute CRC of the decompressed plaintext */ CRC32_Init(&crc); while (size) { unsigned int inlen = (size > 0x8000) ? 0x8000 : size; AES_cbc_encrypt(cipher, plain, inlen, &aes_ctx, iv, AES_DECRYPT); CRC32_Update(&crc, plain, inlen); size -= inlen; cipher += inlen; } CRC32_Final(crc_out, crc); /* Compare computed CRC with stored CRC */ cracked[index] = !memcmp(crc_out, &cur_file->crc.c, 4); } else { const int solid = 0; unpack_data_t *unpack_t; unsigned char plain[20]; unsigned char pre_iv[16]; cracked[index] = 0; memcpy(pre_iv, iv, 16); /* Decrypt just one block for early rejection */ AES_cbc_encrypt(cur_file->blob, plain, 16, &aes_ctx, pre_iv, AES_DECRYPT); /* Early rejection */ if (plain[0] & 0x80) { // PPM checks here. if (!(plain[0] & 0x20) || // Reset bit must be set (plain[1] & 0x80)) // MaxMB must be < 128 goto bailOut; } else { // LZ checks here. if ((plain[0] & 0x40) || // KeepOldTable can't be set !check_huffman(plain)) // Huffman table check goto bailOut; } /* Reset stuff for full check */ AES_set_decrypt_key(key, 128, &aes_ctx); #ifdef _OPENMP unpack_t = &unpack_data[omp_get_thread_num()]; #else unpack_t = unpack_data; #endif unpack_t->max_size = cur_file->unp_size; unpack_t->dest_unp_size = cur_file->unp_size; unpack_t->pack_size = cur_file->pack_size; unpack_t->iv = iv; unpack_t->ctx = &aes_ctx; unpack_t->key = key; if (rar_unpack29(cur_file->blob, solid, unpack_t)) cracked[index] = !memcmp(&unpack_t->unp_crc, &cur_file->crc.c, 4); bailOut:; } } } }
GB_unaryop__minv_int64_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int64_int16 // op(A') function: GB_tran__minv_int64_int16 // C type: int64_t // A type: int16_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 64) #define GB_ATYPE \ int16_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 64) ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT64 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int64_int16 ( int64_t *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int64_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
gate.h
/** * @file gate.h * @author Nader KHAMMASSI - nader.khammassi@gmail.com * @date 02-10-15 * @brief */ #pragma once #ifndef QX_GATE_H #define QX_GATE_H #include <map> #include <xpu.h> #include <immintrin.h> // avx #include <emmintrin.h> // sse #include <core/hash_set.h> #include <core/linalg.h> #include <core/register.h> #include <core/binary_counter.h> #include <core/kronecker.h> // #ifndef __BUILTIN_LINALG__ // #include <boost/numeric/ublas/matrix.hpp> // #endif #define SQRT_2 (1.4142135623730950488016887242096980785696718753769480731766797379f) #define R_SQRT_2 (0.7071067811865475244008443621048490392848359376884740365883398690f) #define __bit_test(x,pos) ((x) & (1<<(pos))) #define __bit_set(x,pos) ((x) | (1<<(pos))) #define __bit_flip(x,pos) ((x) ^ (1<<(pos))) #define __bit_reset(x,pos) ((x) & ~(1<<(pos))) #define __AVX__NO #define __OP_PREFETCH__ //#define SQRT_2 (1.41421356237309504880f) //#define R_SQRT_2 (0.70710678118654752440f) namespace qx { /** * types definition */ typedef uint64_t basis_state_t; typedef std::map<basis_state_t,complex_t> quantum_state_t; typedef enum __gate_type_t { __identity_gate__, __hadamard_gate__, __pauli_x_gate__ , __pauli_y_gate__ , __pauli_z_gate__ , __cnot_gate__ , __toffoli_gate__ , __swap_gate__ , __phase_gate__ , __rx_gate__ , __ry_gate__ , __rz_gate__ , __cphase_gate__ , __t_gate__ , __tdag_gate__ , __sdag_gate__ , __custom_gate__ , __prepx_gate__ , __prepy_gate__ , __prepz_gate__ , __measure_gate__ , __measure_reg_gate__, __measure_x_gate__ , __measure_x_reg_gate__, __measure_y_gate__ , __measure_y_reg_gate__, __ctrl_phase_shift_gate__, __parallel_gate__, __display__, __display_binary__, __print_str__, __bin_ctrl_gate__, __lookup_table__, __classical_not_gate__, __qft_gate__, __prepare_gate__, __unitary_gate__ } gate_type_t; /** * gates coeffecients */ const complex_t cnot_c [] __attribute__((aligned(64))) = { complex_t(1.0), complex_t(0.0), complex_t(0.0), complex_t(0.0), complex_t(0.0), complex_t(1.0), complex_t(0.0), complex_t(0.0), complex_t(0.0), complex_t(0.0), complex_t(0.0), complex_t(1.0), complex_t(0.0), complex_t(0.0), complex_t(1.0), complex_t(0.0) }; /* CNOT */ const complex_t swap_c [] __attribute__((aligned(64))) = { 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 }; /* SWAP */ const complex_t identity_c [] __attribute__((aligned(64))) = { complex_t(1.0), complex_t(0.0), complex_t(0.0), complex_t(1.0) }; /* I */ const complex_t pauli_x_c [] __attribute__((aligned(64))) = { complex_t(0.0, 0.0) , complex_t(1.0, 0.0), complex_t(1.0, 0.0) , complex_t(0.0, 0.0) }; /* X */ const complex_t pauli_y_c [] __attribute__((aligned(64))) = { complex_t(0.0, 0.0) , complex_t(0.0,-1.0), complex_t(0.0, 1.0) , complex_t(0.0, 0.0) }; /* Y */ const complex_t pauli_z_c [] __attribute__((aligned(64))) = { complex_t(1.0, 0.0) , complex_t(0.0, 0.0), complex_t(0.0, 0.0) , complex_t(-1.0,0.0) }; /* Z */ const complex_t phase_c [] __attribute__((aligned(64))) = { complex_t(1.0, 0.0) , complex_t(0.0, 0.0), complex_t(0.0, 0.0) , complex_t(0.0, 1.0) }; /* S */ const complex_t sdag_gate_c[] __attribute__((aligned(64))) = { complex_t(1.0, 0.0) , complex_t(0.0, 0.0), complex_t(0.0, 0.0) , complex_t(0.0, -1.0) }; /* S_dag */ const complex_t t_gate_c [] __attribute__((aligned(64))) = { complex_t(1.0, 0.0) , complex_t(0.0, 0.0), complex_t(0.0, 0.0) , complex_t(cos(M_PI/4),sin(M_PI/4)) }; /* T */ const complex_t tdag_gate_c[] __attribute__((aligned(64))) = { complex_t(1.0, 0.0) , complex_t(0.0, 0.0), complex_t(0.0, 0.0) , complex_t(cos(M_PI/4),-sin(M_PI/4)) }; /* T_dag */ const complex_t hadamard_c [] __attribute__((aligned(64))) = { R_SQRT_2, R_SQRT_2, R_SQRT_2, -R_SQRT_2 }; /* H */ #define __rc(r,c,s) (r*s+c) /** * \brief common abstract gate interface for * all gates implementation. */ class gate { public: virtual int64_t apply(qu_register& qureg) = 0; virtual std::vector<uint64_t> qubits() = 0; virtual std::vector<uint64_t> control_qubits() = 0; virtual std::vector<uint64_t> target_qubits() = 0; virtual gate_type_t type() = 0; virtual std::string micro_code() { return "# unsupported operation : qubit out of range"; } virtual void dump() = 0; virtual ~gate() { }; virtual void set_duration(uint64_t d) { duration = d; } virtual uint64_t get_duration() { return duration; } protected: uint64_t duration; }; /** * \brief rotation in the x-z plane with a given * angle theta (see "Large scale simulation of * error-prone quantum systems" p.39" [Niwa 2002]) */ inline cmatrix_t rotation(double theta) { cmatrix_t r; // (2,2); r(0,0) = complex_t(cos(theta),0); r(0,1) = complex_t(-sin(theta),0); r(1,0) = complex_t(sin(theta),0); r(1,1) = complex_t(cos(theta),0); return r; } /** * \brief phase shift for a given angle phi */ inline cmatrix_t phase(double phi) { cmatrix_t p; // (2,2); p(0,0) = complex_t(1,0); p(0,1) = complex_t(0,0); p(1,0) = complex_t(0,0); p(1,1) = complex_t(cos(phi),sin(phi)); return p; } /** * \brief generate noisy hadamard gate */ cmatrix_t noisy_hadamard(double epsilon1=0, double epsilon2=0) { #ifdef __BUILTIN_LINALG__ return mxm(rotation(M_PI/4 + epsilon1), phase(M_PI + epsilon2)); #else cmatrix_t rz = rotation(M_PI/4 + epsilon1); cmatrix_t p = phase(M_PI + epsilon2); return mxm(rz,p); #endif } /** * \brief build n x n matrix from an array */ cmatrix_t build_matrix(const complex_t * c, uint64_t n) { // assert(n==2); // TO DO : remove the n parameter cmatrix_t m; // (n,n); for (int i=0; i<n; i++) for (int j=0; j<n; j++) m(i,j) = c[i*n+j]; return m; } /** * sqg_apply */ #ifdef QX_COMPACT_GATE_OP inline void sqg_apply(cmatrix_t & cm, uint64_t qubit, qu_register& qureg) { uint64_t n = qureg.size(); matrix_t m(2,row_t(2,0)); m[0][0] = cm(0,0); m[0][1] = cm(0,1); m[1][0] = cm(1,0); m[1][1] = cm(1,1); if (qubit == 0) { identity id(1 << (n-1)); unitary_matrix um(cm.size1(),m); kronecker k(&id, &um); cvector_t r(qureg.get_data()); mulmv(k,qureg.get_data(),r); qureg = r; } else if (qubit == n-1) { identity id(1 << (n-1)); unitary_matrix um(cm.size1(),m); kronecker k(&um, &id); cvector_t r(qureg.get_data()); mulmv(k,qureg.get_data(),r); qureg = r; } else { identity id1(1 << (qubit)); identity id2(1 << (n-qubit-1)); unitary_matrix um(cm.size1(),m); kronecker k(&id2, &um, &id1); cvector_t r(qureg.get_data()); mulmv(k,qureg.get_data(),r); qureg = r; } } /** * u on the kth qubit : * non-null value in each row of the kronocker matrix: * for each row r : * c1 = r || 000100 // 1 at the n-k bit * c2 = r || 000000 */ // #elif QX_SPARSE_MV_MUL #else // QX_SPARSE_MV_MUL uint64_t rw_process(int is, int ie, int s, uint64_t n, uint64_t qubit, const kronecker * m, cvector_t * v, cvector_t * res) { uint64_t k = n-qubit; // println("run : " << is << " .. " << ie); complex_t * pv = v->data(); complex_t * pr = res->data(); size_t nk = n-k; for (uint64_t r=is; r<ie; ++r) { size_t bc = r; size_t c1 = __bit_reset(bc,nk); size_t c2 = __bit_set(bc,nk); // complex_t s; // = 0; pr[r] = pv[c1]*(m->get(r,c1)) + pv[c2]*(m->get(r,c2)); } return 0; } void sparse_mulmv(uint64_t n, uint64_t qubit, const kronecker& m, cvector_t& v, cvector_t& res) { uint64_t k = n-qubit; uint64_t rows = (1 << n); uint64_t z = 0; xpu::task rw_t(rw_process,0,0,0,n,qubit,&m,&v,&res); xpu::parallel_for process(z,rows,1,&rw_t); process.run(); } void __apply_m(std::size_t start, std::size_t end, const std::size_t qubit, complex_t * state, const std::size_t stride0, const std::size_t stride1, const complex_t * matrix) { #if 0 __m128d m00 = matrix[0].xmm; __m128d m01 = matrix[1].xmm; __m128d m10 = matrix[2].xmm; __m128d m11 = matrix[3].xmm; #endif complex_t m00 = matrix[0]; complex_t m01 = matrix[1]; complex_t m10 = matrix[2]; complex_t m11 = matrix[3]; #ifdef USE_OPENMP #pragma omp parallel for // shared(m00,m01,m10,m11) #endif for(size_t offset = start; offset < end; offset += (1L << (qubit + 1L))) for(size_t i = offset; i < offset + (1L << qubit); i++) { size_t i0 = i + stride0; size_t i1 = i + stride1; complex_t in0 = state[i0]; complex_t in1 = state[i1]; state[i0] = m00*in0+m01*in1; state[i1] = m10*in0+m11*in1; #if 0 __m128d in0 = state[i0].xmm; __m128d in1 = state[i1].xmm; state[i0].xmm = _mm_add_pd(xpu::_mm_mulc_pd(m00, in0), xpu::_mm_mulc_pd(m10, in1)); state[i1].xmm = _mm_add_pd(xpu::_mm_mulc_pd(m10, in1), xpu::_mm_mulc_pd(m11, in1)); #endif } } #ifdef __SSE__ // #ifdef __FMA__ void __apply_x(std::size_t start, std::size_t end, const std::size_t qubit, complex_t * state, const std::size_t stride0, const std::size_t stride1, const complex_t * matrix) { #ifdef USE_OPENMP #pragma omp parallel for // private(m00,r00,neg) #endif for(size_t offset = start; offset < end; offset += (1L << (qubit + 1L))) for(size_t i = offset; i < offset + (1L << qubit); i++) { size_t i0 = i + stride0; size_t i1 = i + stride1; __m128d xin0 = state[i0].xmm; // _mm_load_pd((double*)&(state[i0].xmm)); // __m128d xin1 = state[i1].xmm; // _mm_load_pd((double*)&(state[i1].xmm)); state[i0].xmm = state[i1].xmm; state[i1].xmm = xin0; } } // #else // #error "FMA not available !" // #endif // FMA #else #error "SSE not available !" #endif // SSE #ifdef __SSE__ // #ifdef __FMA__ void __apply_h(std::size_t start, std::size_t end, const std::size_t qubit, complex_t * state, const std::size_t stride0, const std::size_t stride1, const complex_t * matrix) { __m128d m00 = matrix[0].xmm; __m128d r00 = _mm_shuffle_pd(m00,m00,3); // 1 cyc __m128d neg = _mm_set1_pd(-0.0f); #ifdef USE_OPENMP #pragma omp parallel for // private(m00,r00,neg) #endif for(size_t offset = start; offset < end; offset += (1L << (qubit + 1L))) for(size_t i = offset; i < offset + (1L << qubit); i++) { size_t i0 = i + stride0; size_t i1 = i + stride1; __m128d xin0 = state[i0].xmm; // _mm_load_pd((double*)&(state[i0].xmm)); __m128d xin1 = state[i1].xmm; // _mm_load_pd((double*)&(state[i1].xmm)); __m128d t2; // = _mm_shuffle_pd(m01,m01,3); // 1 cyc __m128d t1 = _mm_mul_pd(xin0,r00); // 5 cyc #ifdef __FMA__ __m128d xi0 = _mm_fmadd_pd (xin1,r00, t1); // x2*t2+t1 // 5 cyc #else __m128d xi0 = _mm_mul_pd(xin1,r00); xi0 = _mm_add_pd(xi0,t1); // x2*t2+t1 // 5 cyc #endif // __FMA__ // t2 = _mm_shuffle_pd(m11,m11,3); // 1 cyc t2 = _mm_xor_pd(r00,neg); // 1 cyc (m11=-m00) #ifdef __FMA__ __m128d xi1 = _mm_fmadd_pd (xin1, t2, t1); // x2*t2+t1 // 5 cyc #else __m128d xi1 = _mm_mul_pd(xin1,t2); xi1 = _mm_add_pd(xi1,t1); // x2*t2+t1 // 5 cyc #endif state[i0].xmm = xi0; // _mm_store_pd((double*)(&state[i0].xmm),xi0); state[i1].xmm = xi1; // _mm_store_pd((double*)(&state[i1].xmm),xi1); } } // #else // #error "FMA not available !" // #endif // FMA #else #error "SSE not available !" #endif // SSE uint64_t rw_process_ui(int is, int ie, int s, uint64_t n, uint64_t qubit, kronecker_ui m, cvector_t * v, cvector_t * res) { uint64_t k = n-qubit; // println("run : " << is << " .. " << ie); complex_t * pv = v->data(); complex_t * pr = res->data(); size_t bc, c1, c2; size_t nk = n-k; for (uint64_t r=is; r<ie; ++r) { bc = r; c1 = __bit_reset(bc,nk); c2 = __bit_set(bc,nk); bc++; #ifdef __OP_PREFETCH__ _mm_prefetch((void*)&pv[__bit_reset(bc,nk)],_MM_HINT_T0); _mm_prefetch((void*)&pv[__bit_set(bc,nk)],_MM_HINT_T0); #endif // __OP_PREFETCH__ #ifdef __AVX__ // cxc xpu::_mm_cmul_add_pd(pv[c1], pv[c2], m.get(r,c1), m.get(r,c2),pr[r]); // cxr // pr[r].xmm = complex_t::_mm256_cr_mul_add_pd(pv[c1].xmm, m.get(r,c1).xmm, pv[c2].xmm, m.get(r,c2).xmm); #elif __SSE__ // complex_t s; // = 0; //pr[r] = pv[c1]*(m->get(r,c1)) + pv[c2]*(m->get(r,c2)); // --- cc mul add --- pr[r].xmm = _mm_add_pd((pv[c1]*(m.get(r,c1))).xmm, (pv[c2]*(m.get(r,c2))).xmm); // --- cr mul add --- pr[r].xmm = _mm_add_pd(complex_t::mul_cr(pv[c1].xmm,m.get(r,c1).xmm), complex_t::mul_cr(pv[c2].xmm,m.get(r,c2).xmm)); // --- f. mul add --- // pr[r].xmm = complex_t::_mm_cr_mul_add_pd(pv[c1].xmm, m.get(r,c1).xmm, pv[c2].xmm, m.get(r,c2).xmm); #else pr[r] = (pv[c1]*(m.get(r,c1))) + (pv[c2]*(m.get(r,c2))); #endif } return 0; } void sparse_mulmv(uint64_t n, uint64_t qubit, kronecker_ui m, cvector_t& v, cvector_t& res) { uint64_t k = n-qubit; uint64_t rows = (1 << n); uint64_t z = 0; #ifdef SEQUENTIAL rw_process_ui(z,rows,1,n,qubit,m,&v,&res); #else xpu::task rw_t(rw_process_ui,0,0,0,n,qubit,m,&v,&res); xpu::parallel_for process(z,rows,1,&rw_t); process.run(); #endif } uint64_t rw_process_iu(int is, int ie, int s, uint64_t n, uint64_t qubit, kronecker_iu m, cvector_t * v, cvector_t * res) { uint64_t k = n-qubit; // println("run : " << is << " .. " << ie); complex_t * pv = v->data(); complex_t * pr = res->data(); size_t bc, c1, c2; size_t nk = n-k; for (uint64_t r=is; r<ie; ++r) { bc = r; c1 = __bit_reset(bc,nk); c2 = __bit_set(bc,nk); bc++; #ifdef __OP_PREFETCH__ _mm_prefetch((void*)&pv[__bit_reset(bc,nk)],_MM_HINT_T0); _mm_prefetch((void*)&pv[__bit_set(bc,nk)],_MM_HINT_T0); #endif // __OP_PREFETCH__ #ifdef __AVX__ // cxc xpu::_mm_cmul_add_pd(pv[c1], pv[c2], m.get(r,c1), m.get(r,c2),pr[r]); // cxr // pr[r].xmm = complex_t::_mm256_cr_mul_add_pd(pv[c1].xmm, m.get(r,c1).xmm, pv[c2].xmm, m.get(r,c2).xmm); #elif __SSE__ // complex_t s; // = 0; // pr[r] = pv[c1]*(m->get(r,c1)) + pv[c2]*(m->get(r,c2)); // --- cc mul add --- pr[r].xmm = _mm_add_pd((pv[c1]*(m.get(r,c1))).xmm, (pv[c2]*(m.get(r,c2))).xmm); // --- cr mul add --- pr[r].xmm = _mm_add_pd(complex_t::mul_cr(pv[c1].xmm,m.get(r,c1).xmm), complex_t::mul_cr(pv[c2].xmm,m.get(r,c2).xmm)); // --- f. mul add --- // pr[r].xmm = complex_t::_mm_cr_mul_add_pd(pv[c1].xmm, m.get(r,c1).xmm, pv[c2].xmm, m.get(r,c2).xmm); #else pr[r] = (pv[c1]*(m.get(r,c1))) + (pv[c2]*(m.get(r,c2))); #endif } return 0; } void sparse_mulmv(uint64_t n, uint64_t qubit, kronecker_iu m, cvector_t& v, cvector_t& res) { uint64_t k = n-qubit; uint64_t rows = (1 << n); uint64_t z = 0; #ifdef SEQUENTIAL rw_process_iu(z,rows,1,n,qubit,m,&v,&res); #else xpu::task rw_t(rw_process_iu,0,0,0,n,qubit,m,&v,&res); xpu::parallel_for process(z,rows,1,&rw_t); process.run(); #endif } // static xpu::core::os::mutex mtx; uint64_t rw_process_iui(int is, int ie, int s, uint64_t n, uint64_t qubit, kronecker_iui m, cvector_t * v, cvector_t * res) { uint64_t k = n-qubit; // println("run : " << is << " .. " << ie); complex_t * pv = v->data(); complex_t * pr = res->data(); size_t bc, c1, c2; size_t nk = n-k; for (uint64_t r=is; r<ie; r++) //+=2) { // 1st bc = r; c1 = __bit_reset(bc,nk); c2 = __bit_set(bc,nk); bc++; #ifdef __OP_PREFETCH__ _mm_prefetch((void*)&pv[__bit_reset(bc,nk)],_MM_HINT_T0); _mm_prefetch((void*)&pv[__bit_set(bc,nk)],_MM_HINT_T0); #endif // __OP_PREFETCH__ #ifdef __AVX__ // mtx.lock(); // cxc : xpu::_mm_cmul_add_pd(pv[c1], pv[c2], m.get(r,c1), m.get(r,c2),pr[r]); // cxr // pr[r].xmm = complex_t::_mm256_cr_mul_add_pd(pv[c1].xmm, m.get(r,c1).xmm, pv[c2].xmm, m.get(r,c2).xmm); /* __m256d a; //_mm256_loadu2_m128d((double*)&pv[c1], (double*)&pv[c2]); a = _mm256_insertf128_pd(a,_mm_permute_pd(pv[c1].xmm,1), 0); a = _mm256_insertf128_pd(a,_mm_permute_pd(pv[c2].xmm,1), 1); print("(r="<<r<<") : pr12: "); xpu::dump_m256d(a); // __m256d b = _mm256_set_m128d((m.get(r,c1)).xmm, (m.get(r,c2)).xmm); __m256d b; b = _mm256_insertf128_pd(b,_mm_permute_pd(m.get(r,c1).xmm, 1), 1); print("(r="<<r<<") : c1 : "); xpu::dump_m256d(b); b = _mm256_insertf128_pd(b,_mm_permute_pd(m.get(r,c2).xmm, 1), 0); print("(r="<<r<<") : c2 : "); xpu::dump_m256d(b); __m256d ab = xpu::_mm256_cmul_pd(a,b); print("(r="<<r<<") : mul: "); xpu::dump_m256d(ab); __m256d abr = _mm256_permute2f128_pd(ab, ab, 1); print("(r="<<r<<") : prm: "); xpu::dump_m256d(abr); ab = _mm256_add_pd(ab,abr); print("(r="<<r<<") : add: "); xpu::dump_m256d(ab); pr[r].xmm = _mm256_extractf128_pd(ab,0); print("(r="<<r<<") : res:"); xpu::dump_m128d(pr[r].xmm); mtx.unlock(); */ #elif __SSE__ /* mtx.lock(); print("(r="<<r<<") : pr1: "); xpu::dump_m128d(pv[c1].xmm); print("(r="<<r<<") : pr2: "); xpu::dump_m128d(pv[c2].xmm); print("(r="<<r<<") : c1 : "); xpu::dump_m128d((m.get(r,c1)).xmm); print("(r="<<r<<") : c2 : "); xpu::dump_m128d((m.get(r,c2)).xmm); */ // --- cxc mul --- pr[r].xmm = _mm_add_pd((pv[c1]*(m.get(r,c1))).xmm, (pv[c2]*(m.get(r,c2))).xmm); // --- cxr mul --- pr[r].xmm = _mm_add_pd(complex_t::mul_cr(pv[c1].xmm,m.get(r,c1).xmm), complex_t::mul_cr(pv[c2].xmm,m.get(r,c2).xmm)); // --- fus ma --- // pr[r].xmm = complex_t::_mm_cr_mul_add_pd(pv[c1].xmm, m.get(r,c1).xmm, pv[c2].xmm, m.get(r,c2).xmm); // pr[r].xmm = xpu::_mm128_mul_add_pc(pv[c1].xmm, pv[c2].xmm, m.get(r,c1).xmm, m.get(r,c2).xmm); /* print("(r="<<r<<") : res: "); xpu::dump_m128d(pr[r].xmm); mtx.unlock(); */ #else pr[r] = (pv[c1]*(m.get(r,c1))) + (pv[c2]*(m.get(r,c2))); #endif /* // 2nd c1 = __bit_reset(bc,n-k); c2 = __bit_set(bc,n-k); #ifdef __AVX__NO a = _mm256_loadu2_m128d((double*)&pv[c1], (double*)&pv[c2]); // __m256d b = _mm256_set_m128d((m.get(r,c1)).xmm, (m.get(r,c2)).xmm); b = _mm256_insertf128_pd(b,(m.get(bc,c1)).xmm, 1); b = _mm256_insertf128_pd(b,(m.get(bc,c2)).xmm, 0); ab = xpu::_mm256_cmul_pd(a,b); abr = _mm256_permute2f128_pd(ab, ab, 1); ab = _mm256_add_pd(ab,abr); pr[bc].xmm = _mm256_extractf128_pd(ab,0); #elif __SSE__ pr[bc].xmm = _mm_add_pd((pv[c1]*(m.get(bc,c1))).xmm, (pv[c2]*(m.get(bc,c2))).xmm); #else pr[bc] = (pv[c1]*(m.get(bc,c1))) + (pv[c2]*(m.get(bc,c2))); #endif */ } return 0; } void sparse_mulmv(uint64_t n, uint64_t qubit, kronecker_iui m, cvector_t& v, cvector_t& res) { uint64_t k = n-qubit; uint64_t rows = (1 << n); uint64_t z = 0; #ifdef SEQUENTIAL rw_process_iui(z,rows,1,n,qubit,m,&v,&res); #else xpu::task rw_t(rw_process_iui,0,0,0,n,qubit,m,&v,&res); xpu::parallel_for process(z,rows,1,&rw_t); process.run(); #endif } inline void sqg_apply(cmatrix_t & cm, uint64_t qubit, qu_register& qureg) { uint64_t n = qureg.size(); complex_t * s = qureg.get_data().data(); // cm.dump(); __apply_m(0, (1 << n), qubit, s, 0, (1 << qubit), cm.m); return; } #endif // remove naive tensor computation typedef enum { __x180__, __x90__ , __y180__, __y90__ , __ym90__ } elementary_operation_t; static const char * pulse_lt[][5] = { { " pulse 9,0,0", " pulse 10,0,0", " pulse 11,0,0", " pulse 12,0,0", " pulse 14,0,0" }, { " pulse 0,9,0", " pulse 0,10,0", " pulse 0,11,0", " pulse 0,12,0", " pulse 0,14,0" }, { " pulse 0,0,9", " pulse 0,0,10", " pulse 0,0,11", " pulse 0,0,12", " pulse 0,0,14" }, }; /** * \brief hadamard gate: * * | 1 1| * 1/sqrt(2) | | * | 1 -1| */ class hadamard : public gate { private: uint64_t qubit; cmatrix_t m; public: hadamard(uint64_t qubit) : qubit(qubit) //,m((complex_t*)hadamard_c) { m = build_matrix(hadamard_c,2); } int64_t apply(qu_register& qureg) { size_t qs = qureg.states(); complex_t * data = qureg.get_data().data(); // sqg_apply(m,qubit,qureg); __apply_h(0, qs, qubit, data, 0, (1 << qubit), hadamard_c); // __apply_m(0, qs, qubit, data, 0, (1 << qubit), hadamard_c); //__apply_h_old(0, qs, qubit, data, 0, (1 << qubit), hadamard_c); // qureg.set_binary(qubit,__state_unknown__); qureg.set_measurement_prediction(qubit,__state_unknown__); return 0; } std::string micro_code() { /** | wait 5 | y90 q0 --> { pulse 12,0,0 } | wait 5 | x180 q0 --> { pulse 9,0,0 } */ if (qubit > 2) return "# unsupported operation : qubit out of range"; std::stringstream uc; uc << pulse_lt[qubit][__y90__] << "\n"; uc << " wait 4 \n"; uc << pulse_lt[qubit][__x180__] << "\n"; uc << " wait 4 \n"; return uc.str(); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } gate_type_t type() { return __hadamard_gate__; } void dump() { println(" [-] hadamard(q=" << qubit << ")"); } }; void __swap(cvector_t& amp, size_t size, size_t bit, size_t trg, size_t ctrl, size_t offset=0) { // println("bit=" << bit); // println("ctrl=" << ctrl); complex_t * p = amp.data(); for (size_t i=__bit_set(0,bit); i<(1<<size); i += (1 << (bit+1))) for (size_t j=0; j<(1<<bit); j++) { size_t v = i+j+offset; /* #ifdef __SSE__ __m128d x = _mm_load_pd((const double *)&p[v]); __m128d y = _mm_load_pd((const double *)&p[__bit_reset(v,trg)]); _mm_store_pd((double *)&p[__bit_reset(v,trg)],x); _mm_store_pd((double *)&p[v],y); #else */ std::swap(amp[v], amp[__bit_reset(v,trg)]); // println("swap("<<v<<","<<__bit_reset(v,trg)<<")"); // #endif } } int cx_worker(int cs, int ce, int s, cvector_t * p_amp, size_t bit1, size_t bit2, size_t trg, size_t ctrl) { cvector_t &amp = * p_amp; // xpu::parallel_for fswp(__bit_set(0,b1), (1 << qn), (1 << (b1+1)), &t); size_t step=(1 << (bit1+1)); size_t b = cs; size_t e = ce; size_t offset = __bit_set(0,bit1); //for (size_t i=__bit_set(0,bit1); i<(1<<size); i += (1 << (bit1+1))) //__swap(amp,bit1,bit2,trg,ctrl,i); for (size_t i=b; i<e; i++) __swap(amp,bit1,bit2,trg,ctrl,offset+(i*step)); return 0; } /** * \brief controlled-not gate: * * | 1 0 0 0 | * | 0 1 0 0 | * | 0 0 0 1 | * | 0 0 1 1 | */ class cnot : public gate { private: uint64_t control_qubit; uint64_t target_qubit; cmatrix_t m; public: cnot(uint64_t ctrl_q, uint64_t target_q) : control_qubit(ctrl_q), target_qubit(target_q) { // m = build_matrix(cnot_c,4); // stack smaching } // #define CG_HASH_SET //#define CG_MATRIX #ifndef CG_BC #ifndef CG_MATRIX #define CG_BC #endif #endif // CG_BC int64_t apply(qu_register& qreg) { // println("cnot " << control_qubit << "," << target_qubit); #ifdef CG_MATRIX uint64_t sn = qreg.states(); uint64_t qn = qreg.size(); uint64_t cq = control_qubit; uint64_t tq = target_qubit; cmatrix_t i = cidentity_t(sn); perm_t p = perms(qn,cq,tq); // dump_matrix(i); for (perm_t::iterator it = p.begin(); it != p.end(); it++) { i(it->first,it->second) = 1; i(it->second,it->first) = 1; i(it->first, it->first) = 0; i(it->second,it->second) = 0; } // dump_matrix(i); qreg = mxv(i, qreg.get_data()); #elif defined(CG_BC) uint64_t sn = qreg.states(); uint64_t qn = qreg.size(); uint64_t cq = control_qubit; uint64_t tq = target_qubit; cvector_t& amp = qreg.get_data(); // perms(qn,cq,tq,amp); // #if 0 size_t b1 = std::max(cq,tq); size_t b2 = std::min(cq,tq); size_t steps = ((1 << qn)-(__bit_set(0,b1)))/(1 << (b1+1))+1; /* println("from=" << (__bit_set(0,b1))); println("to=" << (1 << qn)); println("s=" << (1 << (b1+1))); println("steps=" << steps); */ if (qn<17) fast_cx(amp, qn, b1, b2, tq, cq); else { #ifdef USE_OPENMP #pragma omp parallel for for (size_t i=0; i<steps; ++i) cx_worker(i,i+1,1,&amp,b1,b2,(size_t)tq,(size_t)cq); #else xpu::task t(cx_worker,0,0,0,&amp,b1,b2,(size_t)tq,(size_t)cq); xpu::parallel_for fswp(0, steps, 1, &t); fswp.run(); #endif } // #endif #elif defined(CG_HASH_SET) uint64_t j = control_qubit+1; uint64_t k = target_qubit+1; uint64_t k2 = (1 << (k-1)); uint64_t j2 = (1 << (j-1)); uint64_t r_size = qreg.states(); xpu::container::hash_set<uint64_t> swap_set; // find swap pairs for (uint64_t t = 0; t < r_size; t++) { if ((t & j2) <= 0) continue; if (swap_set.find(t-k2) == swap_set.end()) swap_set.insert(t); } int64_t t2; cvector_t& amp = qreg.get_data(); complex_t c1, c2; for (xpu::container::hash_set<uint64_t>::iterator t = swap_set.begin(); t != swap_set.end(); ++t) { int64_t _t = *t; t2 = (_t + k2 < r_size) ? _t + k2 : _t - k2; c1 = amp(_t); c2 = amp(t2); std::swap(c1, c2); amp(_t) = c1; amp(t2) = c2; } //qreg=amp; #endif // CG_HASH_SET // if (qreg.get_binary(control_qubit) == __state_1__) if (qreg.get_measurement_prediction(control_qubit) == __state_1__) qreg.flip_binary(target_qubit); //else if (qreg.get_binary(control_qubit) == __state_unknown__) else if (qreg.get_measurement_prediction(control_qubit) == __state_unknown__) qreg.set_measurement_prediction(target_qubit,__state_unknown__); // qreg.set_binary(target_qubit,__state_unknown__); return 0; } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(control_qubit); r.push_back(target_qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; r.push_back(control_qubit); return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(target_qubit); return r; } gate_type_t type() { return __cnot_gate__; } void dump() { println(" [-] cnot(ctrl_qubit=" << control_qubit << ", target_qubit=" << target_qubit << ")"); } private: #if 0 void __swap(cvector_t& amp, size_t size, size_t bit, size_t trg, size_t ctrl, size_t offset=0) { // println("bit=" << bit); // println("ctrl=" << ctrl); for (size_t i=__bit_set(0,bit); i<(1<<size); i += (1 << (bit+1))) for (size_t j=0; j<(1<<bit); j++) { size_t v = i+j+offset; std::swap(amp[v], amp[__bit_reset(v,trg)]); // println(" swap(" << std::bitset<16>(v) << "," << std::bitset<16>(__bit_reset(v,trg)) << ")"); } } #endif void fast_cx(cvector_t& amp, size_t size, size_t bit1, size_t bit2, size_t trg, size_t ctrl) { /* println("from=" << (__bit_set(0,bit1))); println("to=" << (1 << size)); println("s=" << (1 << (bit1+1))); */ for (size_t i=__bit_set(0,bit1); i<(1<<size); i += (1 << (bit1+1))) __swap(amp,bit1,bit2,trg,ctrl,i); } }; template<typename T> void swap_if_greater(T& a, T& b) { if (a > b) { T tmp(a); a = b; b = tmp; } } template<typename T> void sort(T& a, T& b, T& c) { swap_if_greater(a, b); swap_if_greater(a, c); swap_if_greater(b, c); } /** * \brief toffoli gate: * * | 1 0 0 0 | * | 0 1 0 0 | * | 0 0 0 1 | * | 0 0 1 1 | */ class toffoli : public gate { private: uint64_t control_qubit_1; uint64_t control_qubit_2; uint64_t target_qubit; public: toffoli(uint64_t ctrl_q1, uint64_t ctrl_q2, uint64_t target_q) : control_qubit_1(ctrl_q1), control_qubit_2(ctrl_q2), target_qubit(target_q) { } int64_t apply(qu_register& qreg) { uint64_t sn = qreg.states(); uint64_t qn = qreg.size(); uint64_t cq1 = control_qubit_1; uint64_t cq2 = control_qubit_2; uint64_t tq = target_qubit; cvector_t& amp = qreg.get_data(); //println("\ntoffoli " << cq1 << "," << cq2 << "," << tq); #if 1 size_t c1=cq1; size_t c2=cq2; size_t c3=tq; size_t t=tq; size_t size=qn; sort(c1,c2,c3); #ifdef USE_OPENMP #pragma omp parallel for #endif for (size_t i=__bit_set(__bit_set(__bit_set(0,c1),c2),c3); i<(1<<size); i += (1 << (c3+1))) for (size_t j=i; j<(i+(1<<c3)); j += (1 << (c2+1))) for (size_t k=j; k<(j+(1<<c2)); k+=(1 << (c1+1))) for (size_t l=k; l<(k+(1<<(c1))); l++) { std::swap(amp[__bit_set(l,t)],amp[__bit_reset(l,t)]); // println("swap : " << __bit_set(l,t) << "," << __bit_reset(l,t)); } #else std::vector<uint64_t> done(sn, 0); perm_t p = perms(qn,cq1,cq2,tq); uint64_t p1,p2; for (perm_t::iterator it = p.begin(); it != p.end(); it++) { p1 = it->first; p2 = it->second; if (!(done[p1] || done[p2])) //if (!(done[p1])) { // std::swap(amp(p1),amp(p2)); // ublas std::swap(amp[p1],amp[p2]); //println("swap : " << p1 << "," << p2); done[p1] = 1; done[p2] = 1; } } #endif if ((qreg.get_measurement_prediction(control_qubit_1) == __state_1__) && (qreg.get_measurement_prediction(control_qubit_2) == __state_1__) ) { qreg.flip_binary(target_qubit); } else if ((qreg.get_measurement_prediction(control_qubit_1) == __state_unknown__) || (qreg.get_measurement_prediction(control_qubit_2) == __state_unknown__) ) { qreg.set_measurement_prediction(target_qubit,__state_unknown__); // qreg.set_binary(target_qubit,__state_unknown__); } return 0; } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(control_qubit_1); r.push_back(control_qubit_2); r.push_back(target_qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; r.push_back(control_qubit_1); r.push_back(control_qubit_2); return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(target_qubit); return r; } gate_type_t type() { return __toffoli_gate__; } void dump() { println(" [-] toffoli(ctrl_qubit_1=" << control_qubit_1 << ", ctrl_qubit_2=" << control_qubit_2 << ", target_qubit=" << target_qubit << ")"); } }; int fliper(int cs, int ce, int s, uint64_t q, cvector_t * p_amp) { cvector_t &amp = * p_amp; for (int i=cs; i<ce; ++i) { if (__bit_test(i,q)) std::swap(amp[i],amp[__bit_flip(i,q)]); } return 0; } #define __swap_xmm(x,y) { x = _mm_xor_pd(x,y); y = _mm_xor_pd(y,x); x = _mm_xor_pd(x,y); } void fast_flip(uint64_t q, uint64_t n, cvector_t& amp) { complex_t * x = amp.data(); #ifdef USE_OPENMP #pragma omp parallel for #endif for (size_t i=0; i<(1 << n); i+=(1 << (q+1))) for (size_t j=i; j<(i+(1 << q)); j++) //__swap_xmm(x[j].xmm,x[__bit_flip(j,q)].xmm); std::swap(x[j].xmm,x[__bit_flip(j,q)].xmm); } void flip(uint64_t q, uint64_t n, cvector_t& amp) { uint64_t nn = (1 << n); uint64_t p1, p2; std::bitset<MAX_QB_N> b; // perm_t res; b.reset(); b.set(q); uint64_t bc = b.to_ulong(); while (bc < nn) { b.set(q); p1 = b.to_ulong(); b.flip(q); p2 = b.to_ulong(); if (p2<p1) std::swap(amp[p1],amp[p2]); b.flip(q); b = inc(b); b.set(q); bc = b.to_ulong(); } //return res; } /** * \brief identity : * * | 1 0 | * | 0 1 | * */ class identity : public gate { private: uint64_t qubit; cmatrix_t m; public: identity(uint64_t qubit) : qubit(qubit) { m = build_matrix(identity_c,2); } int64_t apply(qu_register& qreg) { return 0; } std::string micro_code() { if (qubit > 2) return "# unsupported operation : qubit out of range"; std::stringstream uc; // uc << pulse_lt[qubit][__x180__] << "\n"; uc << " wait 4 \n"; return uc.str(); } void dump() { println(" [-] identity(qubit=" << qubit << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } gate_type_t type() { return __identity_gate__; } }; /** * \brief pauli-x : * * | 0 1 | * | 1 0 | * */ class pauli_x : public gate { private: uint64_t qubit; cmatrix_t m; public: pauli_x(uint64_t qubit) : qubit(qubit) { m = build_matrix(pauli_x_c,2); } int64_t apply(qu_register& qreg) { // #define FAST_FLIP #ifdef FAST_FLIP uint64_t qn = qreg.size(); cvector_t& amp = qreg.get_data(); // flip(qubit,qn,amp); fast_flip(qubit,qn,amp); /* xpu::task flip_t(fliper,0,0,0,qubit,&amp); xpu::parallel_for parallel_flip(0,(1 << qn),1,&flip_t); parallel_flip.run(); */ #else uint64_t n = qreg.size(); complex_t * s = qreg.get_data().data(); // cm.dump(); __apply_m(0, (1 << n), qubit, s, 0, (1 << qubit), m.m); // sqg_apply(m,qubit,qreg); #endif // FAST_FLIP qreg.flip_binary(qubit); return 0; } std::string micro_code() { /** | wait 5 | x180 q0 --> { pulse 9,0,0 } */ if (qubit > 2) return "# unsupported operation : qubit out of range"; std::stringstream uc; uc << pulse_lt[qubit][__x180__] << "\n"; uc << " wait 4 \n"; return uc.str(); } void dump() { println(" [-] pauli-x(qubit=" << qubit << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } gate_type_t type() { return __pauli_x_gate__; } }; /** * \brief pauli-y : * * | 0 -i | * | i 0 | */ class pauli_y : public gate { private: uint64_t qubit; cmatrix_t m; public: pauli_y(uint64_t qubit) : qubit(qubit) { m = build_matrix(pauli_y_c,2); } int64_t apply(qu_register& qreg) { sqg_apply(m,qubit,qreg); qreg.flip_binary(qubit); return 0; } std::string micro_code() { /** | wait 5 | x180 q0 --> { pulse 9,0,0 } */ if (qubit > 2) return "# unsupported operation : qubit out of range"; std::stringstream uc; uc << pulse_lt[qubit][__y180__] << "\n"; uc << " wait 4 \n"; return uc.str(); } void dump() { println(" [-] pauli-y(qubit=" << qubit << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } gate_type_t type() { return __pauli_y_gate__; } }; /** * \brief pauli-z : * * | 1 0 | * | 0 -1 | */ class pauli_z : public gate { private: uint64_t qubit; cmatrix_t m; public: pauli_z(uint64_t qubit) : qubit(qubit) { m = build_matrix(pauli_z_c,2); } int64_t apply(qu_register& qreg) { sqg_apply(m,qubit,qreg); return 0; } std::string micro_code() { /** | wait 5 | x180 q0 --> { pulse 9,0,0 } */ if (qubit > 2) return "# unsupported operation : qubit out of range"; std::stringstream uc; uc << pulse_lt[qubit][__y180__] << "\n"; uc << " wait 4 \n"; uc << pulse_lt[qubit][__x180__] << "\n"; uc << " wait 4 \n"; return uc.str(); } void dump() { println(" [-] pauli-z(qubit=" << qubit << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } gate_type_t type() { return __pauli_z_gate__; } }; /** * \brief phase : * * | 1 0 | * | 0 i | */ class phase_shift : public gate { private: uint64_t qubit; cmatrix_t m; public: phase_shift(uint64_t qubit) : qubit(qubit) { m = build_matrix(phase_c,2); } int64_t apply(qu_register& qreg) { sqg_apply(m,qubit,qreg); return 0; } std::string micro_code() { if (qubit > 2) return "# unsupported operation : qubit out of range"; std::stringstream uc; uc << pulse_lt[qubit][__y90__] << "\n"; uc << " wait 4 \n"; uc << pulse_lt[qubit][__x90__] << "\n"; uc << " wait 4 \n"; uc << pulse_lt[qubit][__ym90__] << "\n"; uc << " wait 4 \n"; return uc.str(); } void dump() { println(" [-] phase(qubit=" << qubit << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } gate_type_t type() { return __phase_gate__; } }; /** * \brief S dag gate */ class s_dag_gate : public gate { private: uint64_t qubit; cmatrix_t m; public: s_dag_gate(uint64_t qubit) : qubit(qubit) { m = build_matrix(sdag_gate_c,2); } int64_t apply(qu_register& qreg) { sqg_apply(m,qubit,qreg); return 0; } void dump() { println(" [-] s_dag_gate(qubit=" << qubit << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } gate_type_t type() { return __sdag_gate__; } }; /** * \brief T gate */ class t_gate : public gate { private: uint64_t qubit; cmatrix_t m; public: t_gate(uint64_t qubit) : qubit(qubit) { m = build_matrix(t_gate_c,2); } int64_t apply(qu_register& qreg) { sqg_apply(m,qubit,qreg); return 0; } void dump() { println(" [-] t_gate(qubit=" << qubit << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } gate_type_t type() { return __t_gate__; } }; /** * \brief T dag gate */ class t_dag_gate : public gate { private: uint64_t qubit; cmatrix_t m; public: t_dag_gate(uint64_t qubit) : qubit(qubit) { m = build_matrix(tdag_gate_c,2); } int64_t apply(qu_register& qreg) { sqg_apply(m,qubit,qreg); return 0; } void dump() { println(" [-] t_dag_gate(qubit=" << qubit << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } gate_type_t type() { return __tdag_gate__; } }; /** * phase factoring */ void reset_gphase(cmatrix_t& m) { double n = m(0,0).norm(); if (n > 10e-9) { complex_t p(m(0,0).re/n,m(0,0).im/n); m(0,0) /= p; m(0,1) /= p; m(1,0) /= p; m(1,1) /= p; } else { n = m(0,1).norm(); complex_t p(m(0,1).re/n,m(0,1).im/n); m(0,0) /= p; m(0,1) /= p; m(1,0) /= p; m(1,1) /= p; } double n1 = std::sqrt(m(0,0).norm()+m(1,0).norm()); double n2 = std::sqrt(m(0,1).norm()+m(1,1).norm()); m(0,0) /= n1; m(0,1) /= n2; m(1,0) /= n1; m(1,1) /= n2; } /** * | (cos(?/2) -e(i?)sin(?/2)) | * general gate u = | | * | (e(i?)sin(?/2) e(i?+i?)cos(?/2)) | */ class unitary : public gate { private: uint64_t qubit; double angle[3]; cmatrix_t m; public: unitary(uint64_t qubit, double angle[3]) : qubit(qubit) { // m.resize(2,2); m(0,0) = cos(angle[1]/2); m(0,1) = complex_t(-cos(angle[2]/2),-sin(angle[2]/2))*sin(angle[1]/2); m(1,0) = complex_t(cos(angle[3]/2),sin(angle[3]/2))*sin(angle[1]/2) ; m(1,1) = complex_t(cos((angle[3]/2)+(angle[2]/2)),sin((angle[3]/2)+(angle[2]/2)))*cos(angle[1]/2); } int64_t apply(qu_register& qreg) { sqg_apply(m,qubit,qreg); qreg.set_measurement_prediction(qubit,__state_unknown__); // qreg.set_binary(qubit,__state_unknown__); return 0; } double get_angle() { return *angle; } void dump() { println(" [-] unitary(qubit=" << qubit << ", angle=" << angle << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } gate_type_t type() { return __unitary_gate__; } }; /** * \brief rotation-x : */ class rx : public gate { private: uint64_t qubit; double angle; cmatrix_t m; public: rx(uint64_t qubit, double angle) : qubit(qubit), angle(angle) { // m.resize(2,2); m(0,0) = cos(angle/2); m(0,1) = complex_t(0,-sin(angle/2)); m(1,0) = complex_t(0,-sin(angle/2)); m(1,1) = cos(angle/2); reset_gphase(m); } int64_t apply(qu_register& qreg) { sqg_apply(m,qubit,qreg); qreg.set_measurement_prediction(qubit,__state_unknown__); // qreg.set_binary(qubit,__state_unknown__); return 0; } void dump() { println(" [-] rx(qubit=" << qubit << ", angle=" << angle << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } gate_type_t type() { return __rx_gate__; } }; /** * \brief rotation-y : */ class ry : public gate { private: uint64_t qubit; double angle; cmatrix_t m; public: ry(uint64_t qubit, double angle) : qubit(qubit), angle(angle) { // m.resize(2,2); m(0,0) = cos(angle/2); m(0,1) = -sin(angle/2); m(1,0) = sin(angle/2); m(1,1) = cos(angle/2); // reset_gphase(m); } int64_t apply(qu_register& qreg) { sqg_apply(m,qubit,qreg); qreg.set_measurement_prediction(qubit,__state_unknown__); //qreg.set_binary(qubit,__state_unknown__); return 0; } void dump() { println(" [-] ry(qubit=" << qubit << ", angle=" << angle << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } gate_type_t type() { return __ry_gate__; } }; /** * \brief rotation-z : */ class rz : public gate { private: uint64_t qubit; double angle; cmatrix_t m; public: rz(uint64_t qubit, double angle) : qubit(qubit), angle(angle) { // m.resize(2,2); m(0,0) = complex_t(cos(-angle/2), sin(-angle/2)); m(0,1) = 0; m(1,0) = 0; m(1,1) = complex_t(cos(angle/2), sin(angle/2)); reset_gphase(m); } int64_t apply(qu_register& qreg) { sqg_apply(m,qubit,qreg); qreg.set_measurement_prediction(qubit,__state_unknown__); //qreg.set_binary(qubit,__state_unknown__); return 0; } void dump() { println(" [-] rz(qubit=" << qubit << ", angle=" << angle << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } gate_type_t type() { return __rz_gate__; } }; void __shift(cvector_t& amp, size_t size, size_t bit, complex_t p, size_t offset=0) { // println("bit=" << bit); // println("ctrl=" << ctrl); complex_t * x = amp.data(); // println(">>>> " << p); for (size_t i=__bit_set(0,bit); i<(1<<size); i += (1 << (bit+1))) for (size_t j=0; j<(1<<bit); j++) { size_t v = i+j+offset; // amp[v] *= p; // println(" before mul : " << x[v]); x[v] *= p; // println(" after mul : " << x[v]); // println(" swap(" << std::bitset<16>(v) << "," << std::bitset<16>(__bit_reset(v,trg)) << ")"); } } void __shift(complex_t * x, size_t size, size_t bit, complex_t p, size_t offset=0) { // println("bit=" << bit); // println("ctrl=" << ctrl); for (size_t i=__bit_set(0,bit); i<(1<<size); i += (1 << (bit+1))) for (size_t j=0; j<(1<<bit); j++) { size_t v = i+j+offset; // amp[v] *= p; x[v] *= p; // println(" swap(" << std::bitset<16>(v) << "," << std::bitset<16>(__bit_reset(v,trg)) << ")"); } } int shift_worker(int cs, int ce, int s, cvector_t * p_amp, size_t bit1, size_t bit2, complex_t p) { cvector_t &amp = * p_amp; // xpu::parallel_for fswp(__bit_set(0,b1), (1 << qn), (1 << (b1+1)), &t); size_t step=(1 << (bit1+1)); size_t b = cs; size_t e = ce; size_t offset = __bit_set(0,bit1); //for (size_t i=__bit_set(0,bit1); i<(1<<size); i += (1 << (bit1+1))) //__swap(amp,bit1,bit2,trg,ctrl,i); for (size_t i=b; i<e; i++) __shift(amp,bit1,bit2,p,offset+(i*step)); return 0; } uint64_t qft_1st_fold_worker(int is, int ie, int s, uint64_t n, uint64_t qubit, kronecker_ui m, cvector_t * v, cvector_t * res) { uint64_t k = n-qubit; // println("run : " << is << " .. " << ie); complex_t * pv = v->data(); complex_t * pr = res->data(); size_t bc, c1, c2; for (uint64_t r=is; r<ie; ++r) { bc = r; c1 = __bit_reset(bc,n-k); c2 = __bit_set(bc,n-k); #ifdef __OP_PREFETCH__ _mm_prefetch((void*)&pv[__bit_reset((bc+1),n-k)],_MM_HINT_T0); _mm_prefetch((void*)&pv[__bit_set((bc+1),n-k)],_MM_HINT_T0); #endif // __OP_PREFETCH__ #ifdef __AVX__ //NO xpu::_mm_cmul_add_pd(pv[c1], pv[c2], m.get(r,c1), m.get(r,c2),pr[r]); #else // complex_t s; // = 0; //pr[r] = pv[c1]*(m->get(r,c1)) + pv[c2]*(m->get(r,c2)); pr[r].xmm = _mm_add_pd((pv[c1]*(m.get(r,c1))).xmm, (pv[c2]*(m.get(r,c2))).xmm); #endif } size_t bit2 = qubit; for (size_t j=qubit+1; j<n; ++j) { complex_t p(cos(M_PI/(1 << (j-qubit))), sin(M_PI/(1 << (j- qubit)))); size_t bit1 = j; size_t step=(1 << (bit1+1)); size_t offset = __bit_set(0,bit1); for (size_t i=is; i<ie; i++) { // println("i=" << i*step); __shift(pr,bit1,bit2,p,offset+(i*step)); } } return 0; } void qft_1st_fold(uint64_t n, uint64_t qubit, kronecker_ui m, cvector_t& v, cvector_t& res) { uint64_t k = n-qubit; uint64_t rows = (1 << n); uint64_t z = 0; //xpu::task qf_t(qft_fold_worker,0,0,0,n,qubit,m,&v,&res); //xpu::parallel_for process(z,rows,1,&qf_t); //process.run(); qft_1st_fold_worker(0,rows,1,n,qubit,m,&v,&res); } uint64_t qft_nth_fold_worker(int is, int ie, int s, uint64_t n, uint64_t qubit, kronecker_iui m, cvector_t * v, cvector_t * res) { uint64_t k = n-qubit; // println("run : " << is << " .. " << ie); complex_t * pv = v->data(); complex_t * pr = res->data(); size_t bc, c1, c2; for (uint64_t r=is; r<ie; ++r) { bc = r; c1 = __bit_reset(bc,n-k); c2 = __bit_set(bc,n-k); #ifdef __OP_PREFETCH__ _mm_prefetch((void*)&pv[__bit_reset((bc+1),n-k)],_MM_HINT_T0); _mm_prefetch((void*)&pv[__bit_set((bc+1),n-k)],_MM_HINT_T0); #endif // __OP_PREFETCH__ #ifdef __AVX__ //NO xpu::_mm_cmul_add_pd(pv[c1], pv[c2], m.get(r,c1), m.get(r,c2),pr[r]); #else // complex_t s; // = 0; //pr[r] = pv[c1]*(m->get(r,c1)) + pv[c2]*(m->get(r,c2)); pr[r].xmm = _mm_add_pd((pv[c1]*(m.get(r,c1))).xmm, (pv[c2]*(m.get(r,c2))).xmm); #endif } size_t bit2 = qubit; for (size_t j=qubit+1; j<n; ++j) { complex_t p(cos(M_PI/(1 << (j-qubit))), sin(M_PI/(1 << (j-qubit)))); size_t bit1 = j; size_t step=(1 << (bit1+1)); size_t offset = __bit_set(0,bit1); for (size_t i=is; i<ie; i++) { __shift(pr,bit1,bit2,p,offset+(i*step)); } } return 0; } void qft_nth_fold(uint64_t n, uint64_t qubit, kronecker_iui m, cvector_t& v, cvector_t& res) { uint64_t k = n-qubit; uint64_t rows = (1 << n); uint64_t z = 0; //xpu::task qf_t(qft_fold_worker,0,0,0,n,qubit,m,&v,&res); //xpu::parallel_for process(z,rows,1,&qf_t); //process.run(); qft_nth_fold_worker(0,rows,1,n,qubit,m,&v,&res); } int qft_worker(int cs, int ce, int s, size_t n, cvector_t& p_in, cvector_t& p_out, kronecker_ui kr, size_t qubit) { complex_t * in = p_in.data(); complex_t * out = p_out.data(); cvector_t & amp = p_out; // xpu::parallel_for fswp(__bit_set(0,b1), (1 << qn), (1 << (b1+1)), &t); size_t b = cs; size_t e = ce; rw_process_ui(cs, ce, s, n, qubit, kr, &p_in, &p_out); // H size_t bit2 = qubit; for (size_t j=qubit+1; j<n; ++j) { complex_t p(cos(M_PI/(1 << (j-qubit))), sin(M_PI/(1 << (j- qubit)))); size_t bit1 = j; size_t step=(1 << (bit1+1)); size_t offset = __bit_set(0,bit1); for (size_t i=b; i<e; i++) { println("i=" << i*step); __shift(amp,bit1,bit2,p,offset+(i*step)); } } return 0; } int qft_worker(int cs, int ce, int s, size_t n, cvector_t& p_in, cvector_t& p_out, kronecker_iui kr, size_t qubit) { complex_t * in = p_in.data(); complex_t * out = p_out.data(); cvector_t & amp = p_out; // xpu::parallel_for fswp(__bit_set(0,b1), (1 << qn), (1 << (b1+1)), &t); size_t b = cs; size_t e = ce; rw_process_iui(cs, ce, s, n, qubit, kr, &p_in, &p_out); // H return 0; size_t bit2 = qubit; for (size_t j=qubit+1; j<n; ++j) { complex_t p(cos(M_PI/(1 << (j-qubit))), sin(M_PI/(1 << (j- qubit)))); size_t bit1 = j; size_t step=(1 << (bit1+1)); size_t offset = __bit_set(0,bit1); for (size_t i=b; i<e; i++) { __shift(p_out,bit1,bit2,p,offset+(i*step)); } } return 0; } /** * \brief qft */ class qft : public gate { private: std::vector<uint64_t> qubit; cmatrix_t hm; public: qft(std::vector<uint64_t> qubit) : qubit(qubit) { hm = build_matrix(hadamard_c,2); } int64_t apply(qu_register& qreg) { size_t n = qreg.size(); size_t s = qreg.states(); cvector_t& in = qreg.get_data(); cvector_t& out = qreg.get_aux(); // kronecker_ui kui(hm,2,(1 << (n-1))); kronecker_ui kui(hadamard_c,2,(1 << (n-1))); qft_1st_fold(n, 0, kui, in, out); for (size_t i=1; i<n-1; ++i) { size_t q = qubit[i]; // kronecker_iui kiui(hm, 2, (1 << (n-q-1)), (1 << (q))); kronecker_iui kiui(hadamard_c, 2, (1 << (n-q-1)), (1 << (q))); qft_nth_fold(n, 0, kiui, in, out); } in.swap(out); return 0; #if 0 // 1st fold qft_worker(0, s, 1, n, in, out, kronecker_ui(m,2,s-2), 0); return 0; // ith fold for (size_t i=1; i<qubit.size(); ++i) { size_t q = qubit[i]; kronecker_iui k(m, 2, (1 << (n-q-1)), (1 << (q))); qft_worker(0, qreg.states(), 1, qreg.size(), (qreg.get_data()), (qreg.get_aux()), k, q); } // last fold kronecker_iu k(m,2,(1 << (n-1))); sparse_mulmv(n,qubit[n-1],k,qreg.get_data(),qreg.get_aux()); in.swap(out); return 0; #endif } void dump() { print(" [-] qft("); for (size_t i=0; i<(qubit.size()-1); ++i) print("q" << qubit[i] << ","); println("q" << qubit[qubit.size()-1] << ")"); } std::vector<uint64_t> qubits() { return qubit; } std::vector<uint64_t> control_qubits() { return qubit; } std::vector<uint64_t> target_qubits() { return qubit; } gate_type_t type() { return __qft_gate__; } }; /** * phase shifter */ void __apply_cm(complex_t * state, complex_t m[2][2], std::size_t i11, std::size_t i12, std::size_t i13, std::size_t i21, std::size_t i22, std::size_t i23, std::size_t i31, std::size_t i32, std::size_t ish ) { complex_t m00 = m[0][0], m01 = m[0][1], m10 = m[1][0], m11 = m[1][1]; for(std::size_t r1 = i11; r1 < i12; r1 += i13) { #ifdef USE_OPENMP // #pragma omp parallel for #endif for(std::size_t r2 = r1 + i21; r2 < r1 + i22; r2 += i23) { for(std::size_t ind0 = r2 + i31; ind0 < r2 + i32; ind0++) { std::size_t ind1 = ind0 + ish; complex_t in0 = state[ind0], in1 = state[ind1]; state[ind0] = m00 * in0 + m01 * in1; state[ind1] = m10 * in0 + m11 * in1; } } } } /** * \brief controlled phase shift by arbitrary phase angle or (2*pi/(2^(k=ctrl-target))) */ class ctrl_phase_shift : public gate { private: uint64_t ctrl_qubit; uint64_t target_qubit; complex_t z; complex_t m[2][2]; double phase; protected: void build_operator() { m[0][0] = complex_t(cos(-phase/2), sin(-phase/2)); m[0][1] = 0; m[1][0] = 0; m[1][1] = complex_t(cos(phase/2), sin(phase/2)); double n = m[0][0].norm(); if (n > 10e-9) { complex_t p(m[0][0].re/n,m[0][0].im/n); m[0][0] /= p; m[0][1] /= p; m[1][0] /= p; m[1][1] /= p; } else { n = m[0][1].norm(); complex_t p(m[0][0].re/n,m[0][0].im/n); m[0][0] /= p; m[0][1] /= p; m[1][0] /= p; m[1][1] /= p; } } public: /** * ctor (q) */ ctrl_phase_shift(uint64_t ctrl_qubit, uint64_t target_qubit) : ctrl_qubit(ctrl_qubit), target_qubit(target_qubit) { phase = 2*M_PI/(1 << (ctrl_qubit - target_qubit)); build_operator(); } /** * ctor (k) */ ctrl_phase_shift(uint64_t ctrl_qubit, uint64_t target_qubit, size_t k) : ctrl_qubit(ctrl_qubit), target_qubit(target_qubit) { phase = 2*M_PI/(1 << k); build_operator(); } /** * ctor (p) */ ctrl_phase_shift(uint64_t ctrl_qubit, uint64_t target_qubit, double angle) : ctrl_qubit(ctrl_qubit), target_qubit(target_qubit) { phase = angle; build_operator(); } int64_t apply(qu_register& qreg) { uint64_t n = qreg.size(); complex_t * s = qreg.get_data().data(); size_t c = ctrl_qubit; size_t t = target_qubit; if (c > t) __apply_cm(qreg.get_data().data(), m, 0, (1 << n), 1l << (c+1l), 1l << c, 1l << (c+1l), 1l << (t+1l), 0l, 1l << t, 1l << t); else __apply_cm(qreg.get_data().data(), m, 0, (1 << n), 1l << (t+1l), 0l, 1l << t, 1l << (c+1l), 1l << c, 1l<< (c+1l), 1l << t); return 0; } void dump() { println(" [-] ctrl_phase_shift(ctrl_qubit=" << ctrl_qubit << ", target_qubit: " << target_qubit << ", phase = (" << z.re << ", i." << z.im << ") )"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(ctrl_qubit); r.push_back(target_qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; r.push_back(ctrl_qubit); return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(target_qubit); return r; } gate_type_t type() { return __ctrl_phase_shift_gate__; } }; /** * \brief swap : * * | 1 0 0 0 | * | 0 0 1 0 | * | 0 1 0 0 | * | 0 0 0 1 | */ class swap : public gate { private: uint64_t qubit1; uint64_t qubit2; // cmatrix_t m; public: swap(uint64_t qubit1, uint64_t qubit2) : qubit1(qubit1), qubit2(qubit2) { // m = build_matrix(swap_c,4); } int64_t apply(qu_register& qreg) { cnot(qubit1,qubit2).apply(qreg); cnot(qubit2,qubit1).apply(qreg); cnot(qubit1,qubit2).apply(qreg); return 0; } void dump() { println(" [-] swap(q1=" << qubit1 << ", q2=" << qubit2 << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit1); r.push_back(qubit2); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(qubit1); r.push_back(qubit2); return r; } gate_type_t type() { return __swap_gate__; } }; /** * \brief cphase */ class cphase : public gate { private: uint64_t ctrl_qubit; uint64_t target_qubit; public: cphase(uint64_t ctrl_qubit, uint64_t target_qubit) : ctrl_qubit(ctrl_qubit), target_qubit(target_qubit) { } int64_t apply(qu_register& qreg) { hadamard(target_qubit).apply(qreg); cnot(ctrl_qubit,target_qubit).apply(qreg); hadamard(target_qubit).apply(qreg); return 0; } void dump() { println(" [-] cphase(ctrl_qubit=" << ctrl_qubit << ", target_qubit=" << target_qubit << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(ctrl_qubit); r.push_back(target_qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; r.push_back(ctrl_qubit); return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; r.push_back(target_qubit); return r; } gate_type_t type() { return __cphase_gate__; } }; /** * \brief custom matrix gate * */ class custom : public gate { private: // std::vector<uint64_t> qubits; uint64_t qubit; cmatrix_t m; public: // #ifdef __BUILTIN_LINALG__ // custom(std::vector<uint64_t> qubits, qx::linalg::matrix<complex_t> m) : qubits(qubits), m(m) // #else custom(uint64_t qubit, cmatrix_t m) : qubit(qubit), m(m) // #endif { // uint64_t size = 1 << qubits.size(); // if (size != m.size1() || size != m.size2()) // println("[x] error: cutom gate : the matrix size do not match the number of qubits !"); // verify also that the matrix is unitary // #ifdef __BUILTIN_LINALG__ // cmatrix_t ctr(m.size2(),m.size1()); // qx::linalg::matrix<complex_t> ctr(m.size2(),m.size1()); // for (uint64_t i=0; i<m.size2(); ++i) // for (uint64_t j=0; j<m.size1(); ++j) // ctr(i,j) = m(j,i).conj(); // // cmatrix_t mxctr = mxm(m,ctr); // qx::linalg::matrix<complex_t> mxctr = mxm(m,ctr); // qx::linalg::identity_matrix<complex_t> id(m.size1()); // #else // cmatrix_t mxctr = mxm(m,ublas::trans(conj(m))); // ublas::identity_matrix<complex_t> id(m.size1()); // #endif // #ifdef __BUILTIN_LINALG__ // if (qx::linalg::equals(mxctr,id)) // #else // if (equals(mxctr,id)) // #endif // println("[x] error: custom gate : the specified matrix is not unitary !"); } /** * apply */ int64_t apply(qu_register& qreg) { sqg_apply(m,qubit,qreg); qreg.set_measurement_prediction(qubit,__state_unknown__); return 0; } /** * dump */ void dump() { println(" [-] custom matrix on qubit " << qubit); // println(" [-] custom(qubits=" << qubits << ", matrix=" << m << ")"); } /** * type */ gate_type_t type() { return __custom_gate__; } }; int p1_worker(uint64_t cs, uint64_t ce, uint64_t s, double * p1, uint64_t qubit, xpu::lockable * l, cvector_t * p_data) { cvector_t &data = * p_data; double local_p1 = 0; for (uint64_t i=cs; i<ce; ++i) { i = __bit_set(i,qubit); if (i<ce) local_p1 += data[i].norm(); //std::norm(data[i]); // if (__bit_test(i,qubit)) // local_p1 += std::norm(data[i]); } l->lock(); // println("l_p1 [" << cs << ".." << ce << "]: " << local_p1); *p1 += local_p1; l->unlock(); return 0; } int zero_worker(uint64_t cs, uint64_t ce, uint64_t s, int64_t m, double * length, uint64_t qubit, xpu::lockable * l, cvector_t * p_data) { cvector_t &data = * p_data; double local_length = 0; uint64_t size = data.size(); if (m) { for (uint64_t i=cs; i<ce; ++i) { if (!__bit_test(i,qubit)) data[i] = 0; local_length += data[i].norm(); //std::norm(data[i]); } } else { for (uint64_t i=cs; i<ce; ++i) { if (__bit_test(i,qubit)) data[i] = 0; local_length += data[i].norm(); //std::norm(data[i]); } } l->lock(); *length += local_length; l->unlock(); return 0; } int renorm_worker(uint64_t cs, uint64_t ce, uint64_t s, double * length, cvector_t * p_data) { cvector_t &data = * p_data; double l = *length; #ifdef __AVX__ // println("avx"); complex_t * vd = p_data->data(); __m256d vl = _mm256_set1_pd(l); for (uint64_t i=cs; i<ce; i+=2) { double * pvd = (double*)&vd[i]; __m256d va = _mm256_load_pd(pvd); __m256d vr = _mm256_div_pd(va, vl); _mm256_store_pd(pvd,vr); } #elif defined(__SSE__) // println("sse"); complex_t * vd = p_data->data(); __m128d vl = _mm_set1_pd(l); for (uint64_t i=cs; i<ce; ++i) { double * pvd = (double*)&vd[i]; __m128d va = _mm_load_pd(pvd); __m128d vr = _mm_div_pd(va, vl); _mm_store_pd(pvd,vr); } #else for (uint64_t i=cs; i<ce; ++i) data[i] /= l; #endif // __SSE__ return 0; } /** * measure */ class measure : public gate { private: uint64_t qubit; bool measure_all; bool disable_averaging; public: measure(uint64_t qubit, bool disable_averaging=false) : qubit(qubit), measure_all(false), disable_averaging(disable_averaging) { } measure() : qubit(0), measure_all(true) { } int64_t apply(qu_register& qreg) { if (measure_all) { // qreg.measure(); for (size_t q=0; q<qreg.size(); q++) qx::measure(q).apply(qreg); return 0; } double f = qreg.rand(); double p = 0; int64_t value; uint64_t size = qreg.size(); uint64_t n = (1 << size); cvector_t& data = qreg.get_data(); double length = 0; if (size > 64) { // #define PARALLEL_MEASUREMENT // #ifdef PARALLEL_MEASUREMENT xpu::lockable * l = new xpu::core::os::mutex(); xpu::task p1_worker_t(p1_worker, (uint64_t)0, n, (uint64_t)1, &p, qubit, l, &data); xpu::parallel_for parallel_p1( (uint64_t)0, n, (uint64_t)1, &p1_worker_t); parallel_p1.run(); if (f<p) value = 1; else value = 0; xpu::task zero_worker_t(zero_worker,(uint64_t)0, n, (uint64_t)1, value, &length, qubit, l, &data); xpu::parallel_for parallel_zero( (uint64_t)0, n, (uint64_t)1, &zero_worker_t); parallel_zero.run(); length = std::sqrt(length); xpu::task renorm_worker_t(renorm_worker, (uint64_t)0, n, (uint64_t)1, &length, &data); xpu::parallel_for parallel_renorm( (uint64_t)0, n, (uint64_t)1, &renorm_worker_t); parallel_renorm.run(); } else { //#else int64_t k, l, m; int64_t j = qubit; double fvalue; std::bitset<MAX_QB_N> b; b.reset(); b.set(qubit); uint64_t bc = b.to_ulong(); while (bc < n) { bc = b.to_ulong(); // p += std::norm(data[bc]); p += data[bc].norm(); b = inc(b); b.set(qubit); bc = b.to_ulong(); } if (f<p) value = 1; else value = 0; if (value) // 1 { // reset all states where the qubit is 0 for (uint64_t i=0; i<(1 << size); ++i) { if (!__bit_test(i,qubit)) data[i] = 0; } } else { for (uint64_t i=0; i<(1 << size); ++i) { if (__bit_test(i,qubit)) data[i] = 0; } } for (uint64_t k = 0; k < (1 << size); k++) length += data[k].norm(); //std::norm(data[k]); length = std::sqrt(length); for (uint64_t k = 0; k < (1 << size); k++) data[k] /= length; // #endif // PARALLEL_MEASUREMENT } // println(" [>] measured value : " << value); qreg.set_measurement_prediction(qubit,(value == 1 ? __state_1__ : __state_0__)); qreg.set_measurement(qubit,(value == 1 ? true : false)); //qreg.set_binary(qubit,(value == 1 ? __state_1__ : __state_0__)); if (!disable_averaging) { if (qreg.measurement_averaging_enabled) { if (value == 1) { // println("> exited_states++"); qreg.measurement_averaging[qubit].exited_states++; } else { // println("> ground_states++"); qreg.measurement_averaging[qubit].ground_states++; } } } return value; } void dump() { if (measure_all) println(" [-] measure(register)"); else println(" [-] measure(qubit=" << qubit << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; if (!measure_all) r.push_back(qubit); else // this is a dirty hack, itshould be fixed later (unknown qubit number !) { for (int64_t i=0; i<MAX_QB_N; ++i) r.push_back(i); } return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { return qubits(); } gate_type_t type() { if (measure_all) return __measure_reg_gate__; else return __measure_gate__; } }; /** * measure_x */ class measure_x : public gate { private: uint64_t qubit; bool measure_all; bool disable_averaging; qx::hadamard hg; qx::measure mg; public: measure_x(uint64_t qubit, bool disable_averaging=false) : qubit(qubit), measure_all(false), hg(qubit), mg(qubit), disable_averaging(disable_averaging) { } measure_x() : qubit(0), hg(qubit), mg(qubit), measure_all(true) { } int64_t apply(qu_register& qreg) { int64_t r = 0; if (measure_all) { for (size_t i=0; i<qreg.size(); ++i) qx::hadamard(i).apply(qreg); qreg.measure(); for (size_t i=0; i<qreg.size(); ++i) qx::hadamard(i).apply(qreg); return 0; } hg.apply(qreg); r = mg.apply(qreg); hg.apply(qreg); return r; } void dump() { if (measure_all) println(" [-] measure_x(register)"); else println(" [-] measure_x(qubit=" << qubit << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; if (!measure_all) r.push_back(qubit); else // this is a dirty hack, itshould be fixed later (unknown qubit number !) { for (int64_t i=0; i<MAX_QB_N; ++i) r.push_back(i); } return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { return qubits(); } gate_type_t type() { if (measure_all) return __measure_x_reg_gate__; else return __measure_x_gate__; } }; /** * measure_y */ class measure_y : public gate { private: uint64_t qubit; bool measure_all; bool disable_averaging; qx::phase_shift sg; qx::pauli_z zg; qx::measure_x mg; /* S(qubit); Z(qubit); bool b = MeasX(qubit, randint); S(qubit); */ public: measure_y(uint64_t qubit, bool disable_averaging=false) : qubit(qubit), measure_all(false), sg(qubit), zg(qubit), mg(qubit), disable_averaging(disable_averaging) { } measure_y() : qubit(0), sg(qubit), zg(qubit), mg(), measure_all(true) { } int64_t apply(qu_register& qreg) { int64_t r = 0; if (measure_all) { for (size_t i=0; i<qreg.size(); ++i) { qx::phase_shift(i).apply(qreg); qx::pauli_z(i).apply(qreg); } mg.apply(qreg); for (size_t i=0; i<qreg.size(); ++i) qx::phase_shift(i).apply(qreg); return 0; } sg.apply(qreg); zg.apply(qreg); r = mg.apply(qreg); sg.apply(qreg); return r; } void dump() { if (measure_all) println(" [-] measure_y(register)"); else println(" [-] measure_y(qubit=" << qubit << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; if (!measure_all) r.push_back(qubit); else // this is a dirty hack, itshould be fixed later (unknown qubit number !) { for (int64_t i=0; i<MAX_QB_N; ++i) r.push_back(i); } return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { return qubits(); } gate_type_t type() { if (measure_all) return __measure_y_reg_gate__; else return __measure_y_gate__; } }; /** * \brief generic binary controlled gate */ class bin_ctrl : public gate { private: // uint64_t bit; std::vector<size_t> bits; gate * g; public: bin_ctrl(size_t bit, gate * g) : g(g) { bits.push_back(bit); } bin_ctrl(std::vector<size_t> bit, gate * g) : g(g) { for (auto b : bit) bits.push_back(b); } int64_t apply(qu_register& qreg) { bool m = true; for (auto b : bits) if (!qreg.test(b)) m = false; if (m) g->apply(qreg); return 0; } gate * get_gate() { return g; } std::vector<size_t> get_bits() { return bits; } void dump() { print(" [-] bin_ctrl: \n bit=" << bits[0] << " -> "); g->dump(); } std::vector<uint64_t> qubits() { return g->qubits(); } std::vector<uint64_t> control_qubits() { return g->control_qubits(); } std::vector<uint64_t> target_qubits() { return g->target_qubits(); } gate_type_t type() { return __bin_ctrl_gate__; } }; #define bin_ctrl_pauli_x(b,q) bin_ctrl(b,new pauli_x(q)) #define bin_ctrl_pauli_y(b,q) bin_ctrl(b,new pauli_y(q)) #define bin_ctrl_pauli_z(b,q) bin_ctrl(b,new pauli_z(q)) /** * \brief classical binary not gate */ class classical_not : public gate { private: uint64_t bit; public: classical_not(uint64_t bit) : bit(bit) { } int64_t apply(qu_register& qreg) { qreg.flip_measurement(bit); return 0; } uint64_t get_bit() { return bit; } void dump() { // println(" [-] classical not gate: \n bit=" << bit); println(" [-] not " << bit); } std::vector<uint64_t> qubits() { return std::vector<uint64_t>(); } std::vector<uint64_t> control_qubits() { return std::vector<uint64_t>(); } std::vector<uint64_t> target_qubits() { return std::vector<uint64_t>(); } gate_type_t type() { return __classical_not_gate__; } }; /** * prepz */ class prepz : public gate { private: uint64_t qubit; public: prepz(uint64_t qubit) : qubit(qubit) { } int64_t apply(qu_register& qreg) { measure(qubit,true).apply(qreg); bin_ctrl_pauli_x(qubit,qubit).apply(qreg); // bin_ctrl_pauli_z(qubit,qubit).apply(qreg); qreg.set_measurement(qubit,false); return 0; } void dump() { println(" [-] prepz(qubit=" << qubit << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { return qubits(); } gate_type_t type() { return __prepz_gate__; } }; /** * prepx */ class prepx : public gate { private: uint64_t qubit; hadamard h; public: prepx(uint64_t qubit) : qubit(qubit), h(qubit) { } int64_t apply(qu_register& qreg) { h.apply(qreg); measure(qubit,true).apply(qreg); h.apply(qreg); bin_ctrl_pauli_z(qubit,qubit).apply(qreg); qreg.set_measurement(qubit,false); return 0; } void dump() { println(" [-] prepx(qubit=" << qubit << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { return qubits(); } gate_type_t type() { return __prepx_gate__; } }; /** * prepy */ class prepy : public gate { private: uint64_t qubit; prepx px; phase_shift s; public: prepy(uint64_t qubit) : qubit(qubit), px(qubit), s(qubit) { } int64_t apply(qu_register& qreg) { px.apply(qreg); s.apply(qreg); qreg.set_measurement(qubit,false); return 0; } void dump() { println(" [-] prepy(qubit=" << qubit << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; r.push_back(qubit); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { return qubits(); } gate_type_t type() { return __prepy_gate__; } }; class lookup_gate_table : public gate { private: std::vector<uint64_t> ctrl_bits; std::map<uint64_t,gate *> gates; public: lookup_gate_table(uint64_t b0) { ctrl_bits.push_back(b0); } lookup_gate_table(uint64_t b0, uint64_t b1) { ctrl_bits.push_back(b0); ctrl_bits.push_back(b1); } lookup_gate_table(uint64_t b0, uint64_t b1, uint64_t b2) { ctrl_bits.push_back(b0); ctrl_bits.push_back(b1); ctrl_bits.push_back(b2); } lookup_gate_table(std::vector<uint64_t> ctrl_bits) : ctrl_bits(ctrl_bits) { } void add_gate(uint64_t cond, gate * g) { assert(cond < (1<< ctrl_bits.size())); gates[cond] = g; } int64_t apply(qu_register& qreg) { uint64_t k = 0; for (uint64_t i=0; i<ctrl_bits.size(); i++) { //println(qreg.get_binary(i)); if (qreg.test(ctrl_bits[i])) k = k * 2 + 1; else k *= 2; } // println("[+] lookup table : cond = " << k); std::map<uint64_t,gate*>::iterator it = gates.find(k); if (it != gates.end()) (*it).second->apply(qreg); return 0; } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; // to do std::map<uint64_t,gate *>::iterator ig; for (ig=gates.begin(); ig!=gates.end(); ++ig) { std::vector<uint64_t> ri = ig->second->qubits(); r.insert(r.begin(), ri.begin(), ri.end()); } return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; // to do std::map<uint64_t,gate *>::iterator ig; for (ig=gates.begin(); ig!=gates.end(); ++ig) { std::vector<uint64_t> ri = ig->second->control_qubits(); if (ri.size()) r.insert(r.begin(), ri.begin(), ri.end()); } return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; // to do std::map<uint64_t,gate *>::iterator ig; for (ig=gates.begin(); ig!=gates.end(); ++ig) { std::vector<uint64_t> ri = ig->second->target_qubits(); if (ri.size()) r.insert(r.begin(), ri.begin(), ri.end()); } return r; } void dump() { println(" [-] lookup gate table : "); } gate_type_t type() { return __lookup_table__; } }; /** * \brief display : debug utility * display intermediate quantum states of a * quantum register whithin a circuit. */ class display : public gate { private: bool only_binary; public: display(bool only_binary=false) : only_binary(only_binary) { } int64_t apply(qu_register& qreg) { qreg.dump(only_binary); return 0; } void dump() { println(" [-] display(only_binary=" << only_binary << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; return r; } gate_type_t type() { if (only_binary) return __display_binary__; else return __display__; } }; /** * parallel gates */ class parallel_gates : public gate { public: parallel_gates() { } int64_t apply(qu_register& qreg) { for (uint64_t i=0; i<gates.size(); i++) gates[i]->apply(qreg); return 0; } uint64_t add(gate * g) { gates.push_back(g); return gates.size(); } std::vector<gate *> get_gates() { return gates; } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; for (uint64_t i=0; i<gates.size(); i++) { std::vector<uint64_t> q = gates[i]->qubits(); r.insert(r.end(),q.begin(),q.end()); } return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; for (uint64_t i=0; i<gates.size(); i++) { std::vector<uint64_t> q = gates[i]->control_qubits(); r.insert(r.end(),q.begin(),q.end()); } return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; for (uint64_t i=0; i<gates.size(); i++) { std::vector<uint64_t> q = gates[i]->target_qubits(); r.insert(r.end(),q.begin(),q.end()); } return r; } void dump() { println(" [-] parallel_gates (" << gates.size() << " gates) : "); for (uint64_t i=0; i<gates.size(); i++) gates[i]->dump(); } gate_type_t type() { return __parallel_gate__; } private: std::vector<gate *> gates; // list of the parallel gates }; /** * prepare the qubits into an arbitrary quantum state */ class prepare : public gate { private: quantum_state_t * state; public: prepare(quantum_state_t * state) : state(state) { } int64_t apply(qu_register& qreg) { qreg.reset(); cvector_t& q = qreg.get_data(); double norm = 0; for (quantum_state_t::iterator i=state->begin(); i != state->end(); ++i) { basis_state_t bs = (*i).first; complex_t c = (*i).second; // println("bs=" << bs << ", a=" << c); q[bs] = c; norm += c.norm(); //std::norm(c); } if (std::fabs(norm-1) > QUBIT_ERROR_THRESHOLD) { println("[!] warning : the loaded quantum state is not normalized (norm = " << norm << ") !"); println("[!] renormalizing the quantum state..."); qreg.normalize(); println("[!] quantum state renormalized successfully."); } for (size_t qi=0; qi<qreg.size(); ++qi) { qreg.set_measurement_prediction(qi,__state_unknown__); //qreg.set_binary(qi,__state_unknown__); } return 0; } void dump() { println(" [-] prepare (quantum_state=" << state << ")"); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; // this is a dirty hack, itshould be fixed later (unknown qubit number !) for (int64_t i=0; i<MAX_QB_N; ++i) r.push_back(i); return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { return qubits(); } gate_type_t type() { return __prepare_gate__; } }; /** * \brief print : debug utility * print arbitrary string */ class print_str : public gate { private: std::string str; public: print_str(std::string& s) : str(s) { } int64_t apply(qu_register& qreg) { println(str); return 0; } void dump() { println(" print " << str << "\""); } std::vector<uint64_t> qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> control_qubits() { std::vector<uint64_t> r; return r; } std::vector<uint64_t> target_qubits() { std::vector<uint64_t> r; return r; } gate_type_t type() { return __print_str__; } }; } #endif // QX_GATE_H
GB_binop__bset_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bset_int64) // A.*B function (eWiseMult): GB (_AemultB_08__bset_int64) // A.*B function (eWiseMult): GB (_AemultB_02__bset_int64) // A.*B function (eWiseMult): GB (_AemultB_04__bset_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bset_int64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bset_int64) // C+=b function (dense accum): GB (_Cdense_accumb__bset_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bset_int64) // C=scalar+B GB (_bind1st__bset_int64) // C=scalar+B' GB (_bind1st_tran__bset_int64) // C=A+scalar GB (_bind2nd__bset_int64) // C=A'+scalar GB (_bind2nd_tran__bset_int64) // C type: int64_t // A type: int64_t // A pattern? 0 // B type: int64_t // B pattern? 0 // BinaryOp: cij = GB_BITSET (aij, bij, int64_t, 64) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITSET (x, y, int64_t, 64) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSET || GxB_NO_INT64 || GxB_NO_BSET_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bset_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bset_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bset_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bset_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int64_t alpha_scalar ; int64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int64_t *) alpha_scalar_in)) ; beta_scalar = (*((int64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bset_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bset_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bset_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bset_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bset_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITSET (x, bij, int64_t, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bset_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITSET (aij, y, int64_t, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITSET (x, aij, int64_t, 64) ; \ } GrB_Info GB (_bind1st_tran__bset_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITSET (aij, y, int64_t, 64) ; \ } GrB_Info GB (_bind2nd_tran__bset_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
main.c
# include <stdio.h> # include <stdlib.h> # include <pthread.h> # include <omp.h> #define NUM_THREADS 4 #define VALUE 4000000000 #define PAD 8 double global = 0.0; /* void* calPi(void *x) { int tam; int ini; int end; double local = 0.0; tam = VALUE/ NUM_THREADS; ini = (long)x * tam; end = (((long)x + 1) * tam); printf("inicio: %d",ini); printf("fin: %d",end); int i; for(i = ini; i<end; i++){ int denom = 2*i+1; local += (i%2 ? -1 : 1) * (1.0/denom); } global += local; } */ int main(void) { /* pthread_t threads[NUM_THREADS]; long k; int rev; */ #pragma omp parallel num_threads(NUM_THREADS) { int ID = omp_get_thread_num(); double sum [NUM_THREADS][PAD]; int tam; int ini; int end; double local = 0.0; tam = VALUE/ NUM_THREADS; ini = (long)ID * tam; end = (((long)ID + 1) * tam); int i; int j = 0; int k = 0; for(i = ini; i<end; i++){ int denom = 2*i+1; if (i%2 == 0){ sum[j][k] = local += 1 * (1.0/denom); }else{ sum[j][k] = local += -1 * (1.0/denom); } // local += (i%2 ? -1 : 1) * (1.0/denom); } global += local; } /* //CREATE THREADS DEPENDING ON THE QUANTITY for(k = 0; k < NUM_THREADS; k++){ rev = pthread_create(&threads[k], NULL, calPi, (void *)k); if(rev){ printf("ERROR: return code %d\n", rev); } } // JOIN THREADS for(k = 0; k < NUM_THREADS; k++){ rev = pthread_join(threads[k], NULL); if(rev){ printf("ERROR: return code %d\n", rev); exit(-1); } } */ printf("Pi %f\n", global*4); exit(0); }
exemplo_for.c
#include "exemplos.h" //exemplo For int main(int argc, char **argv) { int i, thread_id, nloops; #pragma omp parallel private(thread_id, nloops) { nloops = 0; thread_id = omp_get_thread_num(); #pragma omp for for (i = 0; i < 1000; ++i) { if (nloops == 0) printf("Thread %d started with i=%d\n", thread_id, i); ++nloops; } thread_id = omp_get_thread_num(); printf("Thread %d performed %d iterations of the loop.\n", thread_id, nloops); } return 0; }
cg.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 3.0 structured OpenMP C versions - CG This benchmark is an OpenMP C version of the NPB CG code. The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: M. Yarrow C. Kuszmaul OpenMP C version: S. Satoh 3.0 structure translation: F. Conti --------------------------------------------------------------------*/ /* c--------------------------------------------------------------------- c Note: please observe that in the routine conj_grad three c implementations of the sparse matrix-vector multiply have c been supplied. The default matrix-vector multiply is not c loop unrolled. The alternate implementations are unrolled c to a depth of 2 and unrolled to a depth of 8. Please c experiment with these to find the fastest for your particular c architecture. If reporting timing results, any of these three may c be used without penalty. c--------------------------------------------------------------------- */ #include "../common/npb-C.h" #include "npbparams.h" #define NZ NA*(NONZER+1)*(NONZER+1)+NA*(NONZER+2) /* global variables */ /* common /partit_size/ */ #include <omp.h> static int naa; static int nzz; static int firstrow; static int lastrow; static int firstcol; static int lastcol; /* common /main_int_mem/ */ /* colidx[1:NZ] */ static int colidx[2198001]; /* rowstr[1:NA+1] */ static int rowstr[14002]; /* iv[1:2*NA+1] */ static int iv[28002]; /* arow[1:NZ] */ static int arow[2198001]; /* acol[1:NZ] */ static int acol[2198001]; /* common /main_flt_mem/ */ /* v[1:NA+1] */ static double v[14002]; /* aelt[1:NZ] */ static double aelt[2198001]; /* a[1:NZ] */ static double a[2198001]; /* x[1:NA+2] */ static double x[14003]; /* z[1:NA+2] */ static double z[14003]; /* p[1:NA+2] */ static double p[14003]; /* q[1:NA+2] */ static double q[14003]; /* r[1:NA+2] */ static double r[14003]; //static double w[NA+2+1]; /* w[1:NA+2] */ /* common /urando/ */ static double amult; static double tran; /* function declarations */ static //double w[], void conj_grad(int colidx[],int rowstr[],double x[],double z[],double a[],double p[],double q[],double r[],double *rnorm); static void makea(int n,int nz,double a[],int colidx[],int rowstr[],int nonzer,int firstrow,int lastrow,int firstcol,int lastcol,double rcond,int arow[],int acol[],double aelt[],double v[],int iv[],double shift); static void sparse(double a[],int colidx[],int rowstr[],int n,int arow[],int acol[],double aelt[],int firstrow,int lastrow,double x[],boolean mark[],int nzloc[],int nnza); static void sprnvc(int n,int nz,double v[],int iv[],int nzloc[],int mark[]); static int icnvrt(double x,int ipwr2); static void vecset(int n,double v[],int iv[],int *nzv,int i,double val); /*-------------------------------------------------------------------- program cg --------------------------------------------------------------------*/ int main(int argc,char **argv) { int i; int j; int k; int it; int nthreads = 1; double zeta; double rnorm; double norm_temp11; double norm_temp12; double t; double mflops; char class; boolean verified; double zeta_verify_value; double epsilon; firstrow = 1; lastrow = 14000; firstcol = 1; lastcol = 14000; if (14000 == 1400 && 11 == 7 && 15 == 15 && 20.0 == 10.0) { class = 'S'; zeta_verify_value = 8.5971775078648; } else if (14000 == 7000 && 11 == 8 && 15 == 15 && 20.0 == 12.0) { class = 'W'; zeta_verify_value = 10.362595087124; } else if (14000 == 14000 && 11 == 11 && 15 == 15 && 20.0 == 20.0) { class = 'A'; zeta_verify_value = 17.130235054029; } else if (14000 == 75000 && 11 == 13 && 15 == 75 && 20.0 == 60.0) { class = 'B'; zeta_verify_value = 22.712745482631; } else if (14000 == 150000 && 11 == 15 && 15 == 75 && 20.0 == 110.0) { class = 'C'; zeta_verify_value = 28.973605592845; } else { class = 'U'; } printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version - CG Benchmark\n"); printf(" Size: %10d\n",14000); printf(" Iterations: %5d\n",15); naa = 14000; nzz = 14000 * (11 + 1) * (11 + 1) + 14000 * (11 + 2); /*-------------------------------------------------------------------- c Initialize random number generator c-------------------------------------------------------------------*/ tran = 314159265.0; amult = 1220703125.0; zeta = randlc(&tran,amult); /*-------------------------------------------------------------------- c c-------------------------------------------------------------------*/ makea(naa,nzz,a,colidx,rowstr,11,firstrow,lastrow,firstcol,lastcol,1.0e-1,arow,acol,aelt,v,iv,20.0); /*--------------------------------------------------------------------- c Note: as a result of the above call to makea: c values of j used in indexing rowstr go from 1 --> lastrow-firstrow+1 c values of colidx which are col indexes go from firstcol --> lastcol c So: c Shift the col index vals from actual (firstcol --> lastcol ) c to local, i.e., (1 --> lastcol-firstcol+1) c---------------------------------------------------------------------*/ { for (j = 1; j <= lastrow - firstrow + 1; j += 1) { #pragma omp parallel for private (k) for (k = rowstr[j]; k <= rowstr[j + 1] - 1; k += 1) { colidx[k] = colidx[k] - firstcol + 1; } } /*-------------------------------------------------------------------- c set starting vector to (1, 1, .... 1) c-------------------------------------------------------------------*/ #pragma omp parallel for private (i) for (i = 1; i <= 14001; i += 1) { x[i] = 1.0; } #pragma omp parallel for private (j) for (j = 1; j <= lastcol - firstcol + 1; j += 1) { q[j] = 0.0; z[j] = 0.0; r[j] = 0.0; p[j] = 0.0; } // end omp parallel } zeta = 0.0; /*------------------------------------------------------------------- c----> c Do one iteration untimed to init all code and data page tables c----> (then reinit, start timing, to niter its) c-------------------------------------------------------------------*/ for (it = 1; it <= 1; it += 1) { /*-------------------------------------------------------------------- c The call to the conjugate gradient routine: c-------------------------------------------------------------------*/ /* w,*/ conj_grad(colidx,rowstr,x,z,a,p,q,r,&rnorm); /*-------------------------------------------------------------------- c zeta = shift + 1/(x.z) c So, first: (x.z) c Also, find norm of z c So, first: (z.z) c-------------------------------------------------------------------*/ norm_temp11 = 0.0; norm_temp12 = 0.0; #pragma omp parallel for private (j) reduction (+:norm_temp11,norm_temp12) for (j = 1; j <= lastcol - firstcol + 1; j += 1) { norm_temp11 = norm_temp11 + x[j] * z[j]; norm_temp12 = norm_temp12 + z[j] * z[j]; } norm_temp12 = 1.0 / sqrt(norm_temp12); /*-------------------------------------------------------------------- c Normalize z to obtain x c-------------------------------------------------------------------*/ #pragma omp parallel for private (j) firstprivate (norm_temp12) for (j = 1; j <= lastcol - firstcol + 1; j += 1) { x[j] = norm_temp12 * z[j]; } /* end of do one iteration untimed */ } /*-------------------------------------------------------------------- c set starting vector to (1, 1, .... 1) c-------------------------------------------------------------------*/ #pragma omp parallel for private (i) for (i = 1; i <= 14001; i += 1) { x[i] = 1.0; } zeta = 0.0; timer_clear(1); timer_start(1); /*-------------------------------------------------------------------- c----> c Main Iteration for inverse power method c----> c-------------------------------------------------------------------*/ for (it = 1; it <= 15; it += 1) { /*-------------------------------------------------------------------- c The call to the conjugate gradient routine: c-------------------------------------------------------------------*/ /*, w*/ conj_grad(colidx,rowstr,x,z,a,p,q,r,&rnorm); /*-------------------------------------------------------------------- c zeta = shift + 1/(x.z) c So, first: (x.z) c Also, find norm of z c So, first: (z.z) c-------------------------------------------------------------------*/ norm_temp11 = 0.0; norm_temp12 = 0.0; #pragma omp parallel for private (j) reduction (+:norm_temp11,norm_temp12) for (j = 1; j <= lastcol - firstcol + 1; j += 1) { norm_temp11 = norm_temp11 + x[j] * z[j]; norm_temp12 = norm_temp12 + z[j] * z[j]; } norm_temp12 = 1.0 / sqrt(norm_temp12); zeta = 20.0 + 1.0 / norm_temp11; if (it == 1) { printf(" iteration ||r|| zeta\n"); } printf(" %5d %20.14e%20.13e\n",it,rnorm,zeta); /*-------------------------------------------------------------------- c Normalize z to obtain x c-------------------------------------------------------------------*/ #pragma omp parallel for private (j) firstprivate (norm_temp12) for (j = 1; j <= lastcol - firstcol + 1; j += 1) { x[j] = norm_temp12 * z[j]; } /* end of main iter inv pow meth */ } { //#if defined(_OPENMP) // nthreads = omp_get_num_threads(); //#endif /* _OPENMP */ /* end parallel */ } timer_stop(1); /*-------------------------------------------------------------------- c End of timed section c-------------------------------------------------------------------*/ t = timer_read(1); printf(" Benchmark completed\n"); epsilon = 1.0e-10; //epsilon = 1.0e-2; if (class != 'U') { if (fabs(zeta - zeta_verify_value) <= epsilon) { verified = 1; printf(" VERIFICATION SUCCESSFUL\n"); printf(" Zeta is %20.12e\n",zeta); printf(" Error is %20.12e\n",zeta - zeta_verify_value); } else { verified = 0; printf(" VERIFICATION FAILED\n"); printf(" Zeta %20.12e\n",zeta); printf(" The correct zeta is %20.12e\n",zeta_verify_value); } } else { verified = 0; printf(" Problem size unknown\n"); printf(" NO VERIFICATION PERFORMED\n"); } if (t != 0.0) { mflops = 2.0 * 15 * 14000 * (3.0 + (11 * (11 + 1)) + 25.0 * (5.0 + (11 * (11 + 1))) + 3.0) / t / 1000000.0; } else { mflops = 0.0; } c_print_results("CG",class,14000,0,0,15,nthreads,t,mflops," floating point",verified,"3.0 structured","14 Jan 2020","(none)","(none)","-lm","(none)","(none)","(none)","randdp"); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void conj_grad( /* colidx[1:nzz] */ int colidx[], /* rowstr[1:naa+1] */ int rowstr[], /* x[*] */ double x[], /* z[*] */ double z[], /* a[1:nzz] */ double a[], /* p[*] */ double p[], /* q[*] */ double q[], /* r[*] */ double r[], //double w[], /* w[*] */ double *rnorm) /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*--------------------------------------------------------------------- c Floaging point arrays here are named as in NPB1 spec discussion of c CG algorithm c---------------------------------------------------------------------*/ { static int callcount = 0; double d; double sum; double rho; double rho0; double alpha; double beta; int i; int j; int k; int cgit; int cgitmax = 25; rho = 0.0; /*-------------------------------------------------------------------- c Initialize the CG algorithm: c-------------------------------------------------------------------*/ { #pragma omp parallel for private (j) firstprivate (naa) for (j = 1; j <= naa + 1; j += 1) { q[j] = 0.0; z[j] = 0.0; r[j] = x[j]; p[j] = r[j]; //w[j] = 0.0; } /*-------------------------------------------------------------------- c rho = r.r c Now, obtain the norm of r: First, sum squares of r elements locally... c-------------------------------------------------------------------*/ #pragma omp parallel for private (j) reduction (+:rho) for (j = 1; j <= lastcol - firstcol + 1; j += 1) { rho = rho + r[j] * r[j]; } /* end omp parallel */ } /*-------------------------------------------------------------------- c----> c The conj grad iteration loop c----> c-------------------------------------------------------------------*/ for (cgit = 1; cgit <= cgitmax; cgit += 1) { rho0 = rho; d = 0.0; rho = 0.0; { /*-------------------------------------------------------------------- c q = A.p c The partition submatrix-vector multiply: use workspace w c--------------------------------------------------------------------- C C NOTE: this version of the multiply is actually (slightly: maybe %5) C faster on the sp2 on 16 nodes than is the unrolled-by-2 version C below. On the Cray t3d, the reverse is true, i.e., the C unrolled-by-two version is some 10% faster. C The unrolled-by-8 version below is significantly faster C on the Cray t3d - overall speed of code is 1.5 times faster. */ /* rolled version */ #pragma omp parallel for private (sum,j,k) for (j = 1; j <= lastrow - firstrow + 1; j += 1) { sum = 0.0; #pragma omp parallel for private (k) reduction (+:sum) for (k = rowstr[j]; k <= rowstr[j + 1] - 1; k += 1) { sum = sum + a[k] * p[colidx[k]]; } //w[j] = sum; q[j] = sum; } /* unrolled-by-two version for (j = 1; j <= lastrow-firstrow+1; j++) { int iresidue; double sum1, sum2; i = rowstr[j]; iresidue = (rowstr[j+1]-i) % 2; sum1 = 0.0; sum2 = 0.0; if (iresidue == 1) sum1 = sum1 + a[i]*p[colidx[i]]; for (k = i+iresidue; k <= rowstr[j+1]-2; k += 2) { sum1 = sum1 + a[k] * p[colidx[k]]; sum2 = sum2 + a[k+1] * p[colidx[k+1]]; } w[j] = sum1 + sum2; } */ /* unrolled-by-8 version for (j = 1; j <= lastrow-firstrow+1; j++) { int iresidue; i = rowstr[j]; iresidue = (rowstr[j+1]-i) % 8; sum = 0.0; for (k = i; k <= i+iresidue-1; k++) { sum = sum + a[k] * p[colidx[k]]; } for (k = i+iresidue; k <= rowstr[j+1]-8; k += 8) { sum = sum + a[k ] * p[colidx[k ]] + a[k+1] * p[colidx[k+1]] + a[k+2] * p[colidx[k+2]] + a[k+3] * p[colidx[k+3]] + a[k+4] * p[colidx[k+4]] + a[k+5] * p[colidx[k+5]] + a[k+6] * p[colidx[k+6]] + a[k+7] * p[colidx[k+7]]; } w[j] = sum; } */ /* for (j = 1; j <= lastcol-firstcol+1; j++) { q[j] = w[j]; } */ /*-------------------------------------------------------------------- c Clear w for reuse... c-------------------------------------------------------------------*/ /* for (j = 1; j <= lastcol-firstcol+1; j++) { w[j] = 0.0; } */ /*-------------------------------------------------------------------- c Obtain p.q c-------------------------------------------------------------------*/ #pragma omp parallel for private (j) reduction (+:d) for (j = 1; j <= lastcol - firstcol + 1; j += 1) { d = d + p[j] * q[j]; } /*-------------------------------------------------------------------- c Obtain alpha = rho / (p.q) c-------------------------------------------------------------------*/ alpha = rho0 / d; /*-------------------------------------------------------------------- c Save a temporary of rho c-------------------------------------------------------------------*/ /* rho0 = rho;*/ /*--------------------------------------------------------------------- c Obtain z = z + alpha*p c and r = r - alpha*q c---------------------------------------------------------------------*/ #pragma omp parallel for private (j) reduction (+:rho) firstprivate (alpha) for (j = 1; j <= lastcol - firstcol + 1; j += 1) { z[j] = z[j] + alpha * p[j]; r[j] = r[j] - alpha * q[j]; // } /*--------------------------------------------------------------------- c rho = r.r c Now, obtain the norm of r: First, sum squares of r elements locally... c---------------------------------------------------------------------*/ /* for (j = 1; j <= lastcol-firstcol+1; j++) {*/ rho = rho + r[j] * r[j]; } /*-------------------------------------------------------------------- c Obtain beta: c-------------------------------------------------------------------*/ beta = rho / rho0; /*-------------------------------------------------------------------- c p = r + beta*p c-------------------------------------------------------------------*/ #pragma omp parallel for private (j) firstprivate (beta) for (j = 1; j <= lastcol - firstcol + 1; j += 1) { p[j] = r[j] + beta * p[j]; } callcount++; /* end omp parallel */ } /* end of do cgit=1,cgitmax */ } /*--------------------------------------------------------------------- c Compute residual norm explicitly: ||r|| = ||x - A.z|| c First, form A.z c The partition submatrix-vector multiply c---------------------------------------------------------------------*/ sum = 0.0; { #pragma omp parallel for private (d,j,k) firstprivate (firstrow,lastrow) for (j = 1; j <= lastrow - firstrow + 1; j += 1) { d = 0.0; #pragma omp parallel for private (k) reduction (+:d) for (k = rowstr[j]; k <= rowstr[j + 1] - 1; k += 1) { d = d + a[k] * z[colidx[k]]; } r[j] = d; } /*-------------------------------------------------------------------- c At this point, r contains A.z c-------------------------------------------------------------------*/ #pragma omp parallel for private (d,j) reduction (+:sum) firstprivate (firstcol,lastcol) for (j = 1; j <= lastcol - firstcol + 1; j += 1) { d = x[j] - r[j]; sum = sum + d * d; } //end omp parallel } *rnorm = sqrt(sum); } /*--------------------------------------------------------------------- c generate the test problem for benchmark 6 c makea generates a sparse matrix with a c prescribed sparsity distribution c c parameter type usage c c input c c n i number of cols/rows of matrix c nz i nonzeros as declared array size c rcond r*8 condition number c shift r*8 main diagonal shift c c output c c a r*8 array for nonzeros c colidx i col indices c rowstr i row pointers c c workspace c c iv, arow, acol i c v, aelt r*8 c---------------------------------------------------------------------*/ static void makea(int n,int nz, /* a[1:nz] */ double a[], /* colidx[1:nz] */ int colidx[], /* rowstr[1:n+1] */ int rowstr[],int nonzer,int firstrow,int lastrow,int firstcol,int lastcol,double rcond, /* arow[1:nz] */ int arow[], /* acol[1:nz] */ int acol[], /* aelt[1:nz] */ double aelt[], /* v[1:n+1] */ double v[], /* iv[1:2*n+1] */ int iv[],double shift) { int i; int nnza; int iouter; int ivelt; int ivelt1; int irow; int nzv; /*-------------------------------------------------------------------- c nonzer is approximately (int(sqrt(nnza /n))); c-------------------------------------------------------------------*/ double size; double ratio; double scale; int jcol; size = 1.0; ratio = pow(rcond,1.0 / ((double )n)); nnza = 0; /*--------------------------------------------------------------------- c Initialize colidx(n+1 .. 2n) to zero. c Used by sprnvc to mark nonzero positions c---------------------------------------------------------------------*/ #pragma omp parallel for private (i) for (i = 1; i <= n; i += 1) { colidx[n + i] = 0; } for (iouter = 1; iouter <= n; iouter += 1) { nzv = nonzer; sprnvc(n,nzv,v,iv,&colidx[0],&colidx[n]); vecset(n,v,iv,&nzv,iouter,0.5); for (ivelt = 1; ivelt <= nzv; ivelt += 1) { jcol = iv[ivelt]; if (jcol >= firstcol && jcol <= lastcol) { scale = size * v[ivelt]; for (ivelt1 = 1; ivelt1 <= nzv; ivelt1 += 1) { irow = iv[ivelt1]; if (irow >= firstrow && irow <= lastrow) { nnza = nnza + 1; if (nnza > nz) { printf("Space for matrix elements exceeded in makea\n"); printf("nnza, nzmax = %d, %d\n",nnza,nz); printf("iouter = %d\n",iouter); exit(1); } acol[nnza] = jcol; arow[nnza] = irow; aelt[nnza] = v[ivelt1] * scale; } } } } size = size * ratio; } /*--------------------------------------------------------------------- c ... add the identity * rcond to the generated matrix to bound c the smallest eigenvalue from below by rcond c---------------------------------------------------------------------*/ for (i = firstrow; i <= lastrow; i += 1) { if (i >= firstcol && i <= lastcol) { iouter = n + i; nnza = nnza + 1; if (nnza > nz) { printf("Space for matrix elements exceeded in makea\n"); printf("nnza, nzmax = %d, %d\n",nnza,nz); printf("iouter = %d\n",iouter); exit(1); } acol[nnza] = i; arow[nnza] = i; aelt[nnza] = rcond - shift; } } /*--------------------------------------------------------------------- c ... make the sparse matrix from list of elements with duplicates c (v and iv are used as workspace) c---------------------------------------------------------------------*/ sparse(a,colidx,rowstr,n,arow,acol,aelt,firstrow,lastrow,v,&iv[0],&iv[n],nnza); } /*--------------------------------------------------- c generate a sparse matrix from a list of c [col, row, element] tri c---------------------------------------------------*/ static void sparse( /* a[1:*] */ double a[], /* colidx[1:*] */ int colidx[], /* rowstr[1:*] */ int rowstr[],int n, /* arow[1:*] */ int arow[], /* acol[1:*] */ int acol[], /* aelt[1:*] */ double aelt[],int firstrow,int lastrow, /* x[1:n] */ double x[], /* mark[1:n] */ boolean mark[], /* nzloc[1:n] */ int nzloc[],int nnza) /*--------------------------------------------------------------------- c rows range from firstrow to lastrow c the rowstr pointers are defined for nrows = lastrow-firstrow+1 values c---------------------------------------------------------------------*/ { int nrows; int i; int j; int jajp1; int nza; int k; int nzrow; double xi; /*-------------------------------------------------------------------- c how many rows of result c-------------------------------------------------------------------*/ nrows = lastrow - firstrow + 1; /*-------------------------------------------------------------------- c ...count the number of triples in each row c-------------------------------------------------------------------*/ #pragma omp parallel for private (j) for (j = 1; j <= n; j += 1) { rowstr[j] = 0; mark[j] = 0; } rowstr[n + 1] = 0; for (nza = 1; nza <= nnza; nza += 1) { j = arow[nza] - firstrow + 1 + 1; rowstr[j] = rowstr[j] + 1; } rowstr[1] = 1; for (j = 2; j <= nrows + 1; j += 1) { rowstr[j] = rowstr[j] + rowstr[j - 1]; } /*--------------------------------------------------------------------- c ... rowstr(j) now is the location of the first nonzero c of row j of a c---------------------------------------------------------------------*/ /*--------------------------------------------------------------------- c ... preload data pages c---------------------------------------------------------------------*/ for (j = 0; j <= nrows - 1; j += 1) { #pragma omp parallel for private (k) for (k = rowstr[j]; k <= rowstr[j + 1] - 1; k += 1) { a[k] = 0.0; } } /*-------------------------------------------------------------------- c ... do a bucket sort of the triples on the row index c-------------------------------------------------------------------*/ for (nza = 1; nza <= nnza; nza += 1) { j = arow[nza] - firstrow + 1; k = rowstr[j]; a[k] = aelt[nza]; colidx[k] = acol[nza]; rowstr[j] = rowstr[j] + 1; } /*-------------------------------------------------------------------- c ... rowstr(j) now points to the first element of row j+1 c-------------------------------------------------------------------*/ for (j = nrows; j >= 1; j += -1) { rowstr[j + 1] = rowstr[j]; } rowstr[1] = 1; /*-------------------------------------------------------------------- c ... generate the actual output rows by adding elements c-------------------------------------------------------------------*/ nza = 0; #pragma omp parallel for private (i) firstprivate (n) for (i = 1; i <= n; i += 1) { x[i] = 0.0; mark[i] = 0; } jajp1 = rowstr[1]; for (j = 1; j <= nrows; j += 1) { nzrow = 0; /*-------------------------------------------------------------------- c ...loop over the jth row of a c-------------------------------------------------------------------*/ for (k = jajp1; k <= rowstr[j + 1] - 1; k += 1) { i = colidx[k]; x[i] = x[i] + a[k]; if (mark[i] == 0 && x[i] != 0.0) { mark[i] = 1; nzrow = nzrow + 1; nzloc[nzrow] = i; } } /*-------------------------------------------------------------------- c ... extract the nonzeros of this row c-------------------------------------------------------------------*/ for (k = 1; k <= nzrow; k += 1) { i = nzloc[k]; mark[i] = 0; xi = x[i]; x[i] = 0.0; if (xi != 0.0) { nza = nza + 1; a[nza] = xi; colidx[nza] = i; } } jajp1 = rowstr[j + 1]; rowstr[j + 1] = nza + rowstr[1]; } } /*--------------------------------------------------------------------- c generate a sparse n-vector (v, iv) c having nzv nonzeros c c mark(i) is set to 1 if position i is nonzero. c mark is all zero on entry and is reset to all zero before exit c this corrects a performance bug found by John G. Lewis, caused by c reinitialization of mark on every one of the n calls to sprnvc ---------------------------------------------------------------------*/ static void sprnvc(int n,int nz, /* v[1:*] */ double v[], /* iv[1:*] */ int iv[], /* nzloc[1:n] */ int nzloc[], /* mark[1:n] */ int mark[]) { int nn1; int nzrow; int nzv; int ii; int i; double vecelt; double vecloc; nzv = 0; nzrow = 0; nn1 = 1; do { nn1 = 2 * nn1; }while (nn1 < n); /*-------------------------------------------------------------------- c nn1 is the smallest power of two not less than n c-------------------------------------------------------------------*/ while(nzv < nz){ vecelt = randlc(&tran,amult); /*-------------------------------------------------------------------- c generate an integer between 1 and n in a portable manner c-------------------------------------------------------------------*/ vecloc = randlc(&tran,amult); i = icnvrt(vecloc,nn1) + 1; if (i > n) continue; /*-------------------------------------------------------------------- c was this integer generated already? c-------------------------------------------------------------------*/ if (mark[i] == 0) { mark[i] = 1; nzrow = nzrow + 1; nzloc[nzrow] = i; nzv = nzv + 1; v[nzv] = vecelt; iv[nzv] = i; } } for (ii = 1; ii <= nzrow; ii += 1) { i = nzloc[ii]; mark[i] = 0; } } /*--------------------------------------------------------------------- * scale a double precision number x in (0,1) by a power of 2 and chop it *---------------------------------------------------------------------*/ static int icnvrt(double x,int ipwr2) { return (int )(ipwr2 * x); } /*-------------------------------------------------------------------- c set ith element of sparse vector (v, iv) with c nzv nonzeros to val c-------------------------------------------------------------------*/ static void vecset(int n, /* v[1:*] */ double v[], /* iv[1:*] */ int iv[],int *nzv,int i,double val) { int k; boolean set; set = 0; #pragma omp parallel for private (k) for (k = 1; k <= *nzv; k += 1) { if (iv[k] == i) { v[k] = val; set = 1; } } if (set == 0) { *nzv = *nzv + 1; v[ *nzv] = val; iv[ *nzv] = i; } }
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 8; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,12);t1++) { lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24)); ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(3*t1-1,2)),ceild(24*t2-Nz-4,8));t3<=min(min(min(floord(Nt+Ny-4,8),floord(12*t1+Ny+21,8)),floord(24*t2+Ny+20,8)),floord(24*t1-24*t2+Nz+Ny+19,8));t3++) { for (t4=max(max(max(0,ceild(3*t1-63,64)),ceild(24*t2-Nz-252,256)),ceild(8*t3-Ny-252,256));t4<=min(min(min(min(floord(Nt+Nx-4,256),floord(12*t1+Nx+21,256)),floord(24*t2+Nx+20,256)),floord(8*t3+Nx+4,256)),floord(24*t1-24*t2+Nz+Nx+19,256));t4++) { for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),8*t3-Ny+2),256*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),8*t3+6),256*t4+254),24*t1-24*t2+Nz+21);t5++) { for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) { lbv=max(256*t4,t5+1); ubv=min(256*t4+255,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
atomic.c
#include <stdio.h> #define N 100 int main() { int fail = 0; int error = 0; int a = 0; int ii = 0; #pragma omp target map(tofrom:a) { #pragma omp parallel for for(ii = 0; ii < N; ++ii) #pragma omp atomic a++; } // Check result int result = a; int expect = N; if (result != expect) { printf("update (implicit) a %d != %d (error %d)\n", result, expect, ++error); fail = 1; } if(!fail) printf("successful\n"); else fail = 0; printf("done with %d errors\n", error); return error; }
GB_binop__pair_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pair_int32) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pair_int32) // C+=b function (dense accum): GB (_Cdense_accumb__pair_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_int32) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: int32_t // A type: int32_t // A pattern? 1 // B type: int32_t // B pattern? 1 // BinaryOp: cij = 1 #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // true if values of A are not used #define GB_A_IS_PATTERN \ 1 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // true if values of B are not used #define GB_B_IS_PATTERN \ 1 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = 1 ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_INT32 || GxB_NO_PAIR_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pair_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pair_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pair_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pair_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int32_t alpha_scalar ; int32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int32_t *) alpha_scalar_in)) ; beta_scalar = (*((int32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
bdf2_turbulent_scheme.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Jordi Cotela // #if !defined(KRATOS_BDF2_TURBULENT_SCHEME_H_INCLUDED ) #define KRATOS_BDF2_TURBULENT_SCHEME_H_INCLUDED // System includes #include <string> #include <iostream> // External includes // Project includes #include "solving_strategies/schemes/scheme.h" #include "includes/define.h" // #include "includes/serializer.h" #include "includes/dof.h" #include "processes/process.h" #include "containers/pointer_vector_set.h" #include "utilities/coordinate_transformation_utilities.h" // Application includes #include "fluid_dynamics_application_variables.h" namespace Kratos { ///@addtogroup FluidDynamicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /// A scheme for BDF2 time integration. /** */ template<class TSparseSpace,class TDenseSpace> class BDF2TurbulentScheme : public Scheme<TSparseSpace, TDenseSpace> { public: ///@name Type Definitions ///@{ /// Pointer definition of BDF2TurbulentScheme KRATOS_CLASS_POINTER_DEFINITION(BDF2TurbulentScheme); typedef Scheme<TSparseSpace,TDenseSpace> BaseType; typedef typename TSparseSpace::DataType TDataType; typedef typename TSparseSpace::MatrixType TSystemMatrixType; typedef typename TSparseSpace::VectorType TSystemVectorType; typedef typename TDenseSpace::MatrixType LocalSystemMatrixType; typedef typename TDenseSpace::VectorType LocalSystemVectorType; typedef Dof<TDataType> TDofType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef CoordinateTransformationUtils<LocalSystemMatrixType, LocalSystemVectorType, double> RotationToolType; typedef typename RotationToolType::UniquePointer RotationToolPointerType; ///@} ///@name Life Cycle ///@{ /// Default constructor. BDF2TurbulentScheme() : Scheme<TSparseSpace, TDenseSpace>() , mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()) {} /// Constructor to use the formulation combined with a turbulence model. /** * The turbulence model is assumed to be implemented as a Kratos::Process. * The model's Execute() method wil be called at the start of each * non-linear iteration. * @param pTurbulenceModel pointer to the turbulence model */ BDF2TurbulentScheme(Process::Pointer pTurbulenceModel) : Scheme<TSparseSpace, TDenseSpace>() , mpTurbulenceModel(pTurbulenceModel) , mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()) {} /// Constructor for periodic boundary conditions. /** * @param rPeriodicVar the variable used to store periodic pair indices. */ BDF2TurbulentScheme(const Kratos::Variable<int>& rPeriodicVar) : Scheme<TSparseSpace, TDenseSpace>() , mrPeriodicIdVar(rPeriodicVar) {} /// Destructor. ~BDF2TurbulentScheme() override {} ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /// Check input data for errors. /** * @param rModelPart The fluid's ModelPart * @return 0 if no errors were found */ int Check(ModelPart& rModelPart) override { KRATOS_TRY // Base scheme check int error_code = BaseType::Check(rModelPart); if (error_code != 0) { return error_code; } // Check buffer size KRATOS_ERROR_IF(rModelPart.GetBufferSize() < 3) << "Insufficient buffer size for BDF2, should be at least 3, got " << rModelPart.GetBufferSize() << std::endl; return 0; KRATOS_CATCH(""); } void Initialize(ModelPart& rModelPart) override { // Set up the rotation tool pointer const auto& r_proces_info = rModelPart.GetProcessInfo(); const unsigned int domain_size = r_proces_info[DOMAIN_SIZE]; auto p_aux = Kratos::make_unique<RotationToolType>(domain_size, domain_size + 1, SLIP); mpRotationTool.swap(p_aux); // Base initialize call BaseType::Initialize(rModelPart); } /// Set the time iteration coefficients void InitializeSolutionStep( ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { this->SetTimeCoefficients(rModelPart.GetProcessInfo()); // Base function initializes elements and conditions BaseType::InitializeSolutionStep(rModelPart,A,Dx,b); // Recalculate mesh velocity (to account for variable time step) const double tol = 1.0e-12; const double Dt = rModelPart.GetProcessInfo()[DELTA_TIME]; const double OldDt = rModelPart.GetProcessInfo().GetPreviousSolutionStepInfo(1)[DELTA_TIME]; if(std::abs(Dt - OldDt) > tol) { const int n_nodes = rModelPart.NumberOfNodes(); const Vector& BDFcoefs = rModelPart.GetProcessInfo()[BDF_COEFFICIENTS]; #pragma omp parallel for for(int i_node = 0; i_node < n_nodes; ++i_node) { auto it_node = rModelPart.NodesBegin() + i_node; auto& rMeshVel = it_node->FastGetSolutionStepValue(MESH_VELOCITY); const auto& rDisp0 = it_node->FastGetSolutionStepValue(DISPLACEMENT); const auto& rDisp1 = it_node->FastGetSolutionStepValue(DISPLACEMENT,1); const auto& rDisp2 = it_node->FastGetSolutionStepValue(DISPLACEMENT,2); rMeshVel = BDFcoefs[0] * rDisp0 + BDFcoefs[1] * rDisp1 + BDFcoefs[2] * rDisp2; } } } void InitializeNonLinIteration( ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { KRATOS_TRY if (mpTurbulenceModel != 0) mpTurbulenceModel->Execute(); KRATOS_CATCH("") } void FinalizeNonLinIteration( ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { const ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); //if orthogonal subscales are computed if (CurrentProcessInfo[OSS_SWITCH] == 1.0) { this->LumpedProjection(rModelPart); //this->FullProjection(rModelPart); } } /// Start the iteration by providing a first approximation to the solution. void Predict( ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { KRATOS_TRY const int n_nodes = rModelPart.NumberOfNodes(); const Vector& BDFcoefs = rModelPart.GetProcessInfo()[BDF_COEFFICIENTS]; #pragma omp parallel for for(int i_node = 0; i_node < n_nodes; ++i_node) { auto it_node = rModelPart.NodesBegin() + i_node; auto& rVel0 = it_node->FastGetSolutionStepValue(VELOCITY); const auto& rVel1 = it_node->FastGetSolutionStepValue(VELOCITY,1); const auto& rVel2 = it_node->FastGetSolutionStepValue(VELOCITY,2); auto& rAcceleration = it_node->FastGetSolutionStepValue(ACCELERATION); // Predict velocities if(!it_node->IsFixed(VELOCITY_X)) rVel0[0] = 2.00 * rVel1[0] - rVel2[0]; if(!it_node->IsFixed(VELOCITY_Y)) rVel0[1] = 2.00 * rVel1[1] - rVel2[1]; if(!it_node->IsFixed(VELOCITY_Z)) rVel0[2] = 2.00 * rVel1[2] - rVel2[2]; // Predict acceleration rAcceleration = BDFcoefs[0] * rVel0 + BDFcoefs[1] * rVel1 + BDFcoefs[2] * rVel2; } KRATOS_CATCH("") } /// Store the iteration results as solution step variables and update acceleration after a Newton-Raphson iteration. /** * @param rModelPart fluid ModelPart * @param rDofSet DofSet containing the Newton-Raphson system degrees of freedom. * @param A Newton-Raphson system matrix (unused) * @param Dx Newton-Raphson iteration solution * @param b Newton-Raphson right hand side (unused) */ void Update( ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { KRATOS_TRY mpRotationTool->RotateVelocities(rModelPart); mpDofUpdater->UpdateDofs(rDofSet,Dx); mpRotationTool->RecoverVelocities(rModelPart); const Vector& BDFCoefs = rModelPart.GetProcessInfo()[BDF_COEFFICIENTS]; this->UpdateAcceleration(rModelPart,BDFCoefs); KRATOS_CATCH("") } void CalculateSystemContributions( Element& rCurrentElement, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& rEquationId, const ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY LocalSystemMatrixType Mass; LocalSystemMatrixType Damp; // Get Equation Id rCurrentElement.EquationIdVector(rEquationId,rCurrentProcessInfo); // Get matrix contributions rCurrentElement.CalculateLocalSystem(LHS_Contribution,RHS_Contribution,rCurrentProcessInfo); rCurrentElement.CalculateMassMatrix(Mass,rCurrentProcessInfo); rCurrentElement.CalculateLocalVelocityContribution(Damp,RHS_Contribution,rCurrentProcessInfo); // Add the dynamic contributions to the local system using BDF2 coefficients this->CombineLHSContributions(LHS_Contribution,Mass,Damp,rCurrentProcessInfo); this->AddDynamicRHSContribution<Kratos::Element>(rCurrentElement,RHS_Contribution,Mass,rCurrentProcessInfo); // Apply slip condition mpRotationTool->Rotate(LHS_Contribution, RHS_Contribution, rCurrentElement.GetGeometry()); mpRotationTool->ApplySlipCondition(LHS_Contribution, RHS_Contribution, rCurrentElement.GetGeometry()); KRATOS_CATCH("") } void CalculateRHSContribution( Element& rCurrentElement, LocalSystemVectorType &RHS_Contribution, Element::EquationIdVectorType &rEquationId, const ProcessInfo &rCurrentProcessInfo) override { KRATOS_TRY LocalSystemMatrixType Mass; LocalSystemMatrixType Damp; // Get Equation Id rCurrentElement.EquationIdVector(rEquationId,rCurrentProcessInfo); // Get matrix contributions rCurrentElement.CalculateRightHandSide(RHS_Contribution,rCurrentProcessInfo); rCurrentElement.CalculateMassMatrix(Mass,rCurrentProcessInfo); rCurrentElement.CalculateLocalVelocityContribution(Damp,RHS_Contribution,rCurrentProcessInfo); // Add the dynamic contributions to the local system using BDF2 coefficients this->AddDynamicRHSContribution<Kratos::Element>(rCurrentElement,RHS_Contribution,Mass,rCurrentProcessInfo); // Apply slip condition mpRotationTool->Rotate(RHS_Contribution, rCurrentElement.GetGeometry()); mpRotationTool->ApplySlipCondition(RHS_Contribution, rCurrentElement.GetGeometry()); KRATOS_CATCH("") } void CalculateSystemContributions( Condition& rCurrentCondition, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& rEquationId, const ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY LocalSystemMatrixType Mass; LocalSystemMatrixType Damp; // Get Equation Id rCurrentCondition.EquationIdVector(rEquationId,rCurrentProcessInfo); // Get matrix contributions rCurrentCondition.CalculateLocalSystem(LHS_Contribution,RHS_Contribution,rCurrentProcessInfo); rCurrentCondition.CalculateMassMatrix(Mass,rCurrentProcessInfo); rCurrentCondition.CalculateLocalVelocityContribution(Damp,RHS_Contribution,rCurrentProcessInfo); // Add the dynamic contributions to the local system using BDF2 coefficients this->CombineLHSContributions(LHS_Contribution,Mass,Damp,rCurrentProcessInfo); this->AddDynamicRHSContribution<Kratos::Condition>(rCurrentCondition,RHS_Contribution,Mass,rCurrentProcessInfo); // Apply slip condition mpRotationTool->Rotate(LHS_Contribution, RHS_Contribution, rCurrentCondition.GetGeometry()); mpRotationTool->ApplySlipCondition(LHS_Contribution, RHS_Contribution, rCurrentCondition.GetGeometry()); KRATOS_CATCH("") } void CalculateRHSContribution( Condition &rCurrentCondition, LocalSystemVectorType &RHS_Contribution, Element::EquationIdVectorType &rEquationId, const ProcessInfo &rCurrentProcessInfo) override { KRATOS_TRY LocalSystemMatrixType Mass; LocalSystemMatrixType Damp; // Get Equation Id rCurrentCondition.EquationIdVector(rEquationId,rCurrentProcessInfo); // Get matrix contributions rCurrentCondition.CalculateRightHandSide(RHS_Contribution,rCurrentProcessInfo); rCurrentCondition.CalculateMassMatrix(Mass,rCurrentProcessInfo); rCurrentCondition.CalculateLocalVelocityContribution(Damp,RHS_Contribution,rCurrentProcessInfo); // Add the dynamic contributions to the local system using BDF2 coefficients this->AddDynamicRHSContribution<Kratos::Condition>(rCurrentCondition,RHS_Contribution,Mass,rCurrentProcessInfo); // Apply slip condition mpRotationTool->Rotate(RHS_Contribution, rCurrentCondition.GetGeometry()); mpRotationTool->ApplySlipCondition(RHS_Contribution, rCurrentCondition.GetGeometry()); KRATOS_CATCH("") } /// Free memory allocated by this object. void Clear() override { this->mpDofUpdater->Clear(); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { std::stringstream buffer; buffer << "BDF2TurbulentScheme"; return buffer.str(); } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override {} ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /// Calculate the coefficients for time iteration. /** * @param rCurrentProcessInfo ProcessInfo instance from the fluid ModelPart. Must contain DELTA_TIME and BDF_COEFFICIENTS variables. */ void SetTimeCoefficients(ProcessInfo& rCurrentProcessInfo) { KRATOS_TRY; //calculate the BDF coefficients double Dt = rCurrentProcessInfo[DELTA_TIME]; double OldDt = rCurrentProcessInfo.GetPreviousTimeStepInfo(1)[DELTA_TIME]; double Rho = OldDt / Dt; double TimeCoeff = 1.0 / (Dt * Rho * Rho + Dt * Rho); Vector& BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; BDFcoeffs.resize(3, false); BDFcoeffs[0] = TimeCoeff * (Rho * Rho + 2.0 * Rho); //coefficient for step n+1 (3/2Dt if Dt is constant) BDFcoeffs[1] = -TimeCoeff * (Rho * Rho + 2.0 * Rho + 1.0); //coefficient for step n (-4/2Dt if Dt is constant) BDFcoeffs[2] = TimeCoeff; //coefficient for step n-1 (1/2Dt if Dt is constant) KRATOS_CATCH(""); } /// Update Dof values after a Newton-Raphson iteration. /** * @param rDofSet Container for the Degrees of freedom in the system * @param Dx Solution of the linear system */ virtual void UpdateDofs( DofsArrayType& rDofSet, TSystemVectorType& Dx) { KRATOS_TRY const int n_dof = rDofSet.size(); #pragma omp parallel for for (int i_dof = 0; i_dof < n_dof; ++i_dof) { auto it_dof = rDofSet.begin() + i_dof; if (it_dof->IsFree()) { it_dof->GetSolutionStepValue() += TSparseSpace::GetValue(Dx, it_dof->EquationId()); } } KRATOS_CATCH("") } /// Update Dof values after a Newton-Raphson iteration /** * @param rModelPart fluid ModelPart * @param rBDFcoefs Time stepping coefficients for this iteration. */ void UpdateAcceleration( ModelPart& rModelPart, const Vector& rBDFcoefs) { KRATOS_TRY const double Coef0 = rBDFcoefs[0]; const double Coef1 = rBDFcoefs[1]; const double Coef2 = rBDFcoefs[2]; const int n_nodes = rModelPart.NumberOfNodes(); #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; ++i_node) { auto it_node = rModelPart.NodesBegin() + i_node; const auto& rVel0 = it_node->FastGetSolutionStepValue(VELOCITY); const auto& rVel1 = it_node->FastGetSolutionStepValue(VELOCITY,1); const auto& rVel2 = it_node->FastGetSolutionStepValue(VELOCITY,2); auto& rAcceleration = it_node->FastGetSolutionStepValue(ACCELERATION); rAcceleration = Coef0 * rVel0 + Coef1 * rVel1 + Coef2 * rVel2; } KRATOS_CATCH("") } void CombineLHSContributions( LocalSystemMatrixType& rLHS, LocalSystemMatrixType& rMass, LocalSystemMatrixType& rDamp, const ProcessInfo& rCurrentProcessInfo) { const double Coef0 = rCurrentProcessInfo.GetValue(BDF_COEFFICIENTS)[0]; if (rMass.size1() != 0) noalias(rLHS) += Coef0 * rMass; if (rDamp.size1() != 0) noalias(rLHS) += rDamp; } template<class TObject> void AddDynamicRHSContribution( TObject& rObject, LocalSystemVectorType& rRHS, LocalSystemMatrixType& rMass, const ProcessInfo& rCurrentProcessInfo) { if (rMass.size1() != 0) { const Vector& rCoefs = rCurrentProcessInfo.GetValue(BDF_COEFFICIENTS); const auto& r_const_obj_ref = rObject; LocalSystemVectorType Acc; r_const_obj_ref.GetFirstDerivativesVector(Acc); Acc *= rCoefs[0]; for(unsigned int n = 1; n < 3; ++n) { LocalSystemVectorType rVel; r_const_obj_ref.GetFirstDerivativesVector(rVel,n); noalias(Acc) += rCoefs[n] * rVel; } noalias(rRHS) -= prod(rMass,Acc); } } void FullProjection(ModelPart& rModelPart) { const ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo(); // Initialize containers const int n_nodes = rModelPart.NumberOfNodes(); const int n_elems = rModelPart.NumberOfElements(); const array_1d<double,3> zero_vect = ZeroVector(3); #pragma omp parallel for firstprivate(zero_vect) for (int i_node = 0; i_node < n_nodes; ++i_node) { auto ind = rModelPart.NodesBegin() + i_node; noalias(ind->FastGetSolutionStepValue(ADVPROJ)) = zero_vect; // "x" ind->FastGetSolutionStepValue(DIVPROJ) = 0.0; // "x" ind->FastGetSolutionStepValue(NODAL_AREA) = 0.0; // "Ml" } // Newton-Raphson parameters const double RelTol = 1e-4 * rModelPart.NumberOfNodes(); const double AbsTol = 1e-6 * rModelPart.NumberOfNodes(); const unsigned int MaxIter = 100; // iteration variables unsigned int iter = 0; array_1d<double,3> dMomProj = zero_vect; double dMassProj = 0.0; double RelMomErr = 1000.0 * RelTol; double RelMassErr = 1000.0 * RelTol; double AbsMomErr = 1000.0 * AbsTol; double AbsMassErr = 1000.0 * AbsTol; while( ( (AbsMomErr > AbsTol && RelMomErr > RelTol) || (AbsMassErr > AbsTol && RelMassErr > RelTol) ) && iter < MaxIter) { // Reinitialize RHS #pragma omp parallel for firstprivate(zero_vect) for (int i_node = 0; i_node < n_nodes; ++i_node) { auto ind = rModelPart.NodesBegin() + i_node; noalias(ind->GetValue(ADVPROJ)) = zero_vect; // "b" ind->GetValue(DIVPROJ) = 0.0; // "b" ind->FastGetSolutionStepValue(NODAL_AREA) = 0.0; // Reset because Calculate will overwrite it } // Reinitialize errors RelMomErr = 0.0; RelMassErr = 0.0; AbsMomErr = 0.0; AbsMassErr = 0.0; // Compute new values array_1d<double, 3 > output; #pragma omp parallel for private(output) for (int i_elem = 0; i_elem < n_elems; ++i_elem) { auto it_elem = rModelPart.ElementsBegin() + i_elem; it_elem->Calculate(SUBSCALE_VELOCITY, output, rCurrentProcessInfo); } rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA); rModelPart.GetCommunicator().AssembleCurrentData(DIVPROJ); rModelPart.GetCommunicator().AssembleCurrentData(ADVPROJ); rModelPart.GetCommunicator().AssembleNonHistoricalData(DIVPROJ); rModelPart.GetCommunicator().AssembleNonHistoricalData(ADVPROJ); // Update iteration variables #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; ++i_node) { auto ind = rModelPart.NodesBegin() + i_node; const double Area = ind->FastGetSolutionStepValue(NODAL_AREA); // Ml dx = b - Mc x dMomProj = ind->GetValue(ADVPROJ) / Area; dMassProj = ind->GetValue(DIVPROJ) / Area; RelMomErr += sqrt( dMomProj[0]*dMomProj[0] + dMomProj[1]*dMomProj[1] + dMomProj[2]*dMomProj[2]); RelMassErr += fabs(dMassProj); auto& rMomRHS = ind->FastGetSolutionStepValue(ADVPROJ); double& rMassRHS = ind->FastGetSolutionStepValue(DIVPROJ); rMomRHS += dMomProj; rMassRHS += dMassProj; AbsMomErr += sqrt( rMomRHS[0]*rMomRHS[0] + rMomRHS[1]*rMomRHS[1] + rMomRHS[2]*rMomRHS[2]); AbsMassErr += fabs(rMassRHS); } if(AbsMomErr > 1e-10) RelMomErr /= AbsMomErr; else // If residual is close to zero, force absolute convergence to avoid division by zero errors RelMomErr = 1000.0; if(AbsMassErr > 1e-10) RelMassErr /= AbsMassErr; else RelMassErr = 1000.0; iter++; } KRATOS_INFO("BDF2TurbulentScheme") << "Performed OSS Projection in " << iter << " iterations" << std::endl; } void LumpedProjection(ModelPart& rModelPart) { const int n_nodes = rModelPart.NumberOfNodes(); const int n_elems = rModelPart.NumberOfElements(); const ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo(); const array_1d<double,3> zero_vect = ZeroVector(3); #pragma omp parallel for firstprivate(zero_vect) for (int i_node = 0; i_node < n_nodes; ++i_node) { auto itNode = rModelPart.NodesBegin() + i_node; noalias(itNode->FastGetSolutionStepValue(ADVPROJ)) = zero_vect; itNode->FastGetSolutionStepValue(DIVPROJ) = 0.0; itNode->FastGetSolutionStepValue(NODAL_AREA) = 0.0; } array_1d<double, 3 > Out; #pragma omp parallel for private(Out) for (int i_elem = 0; i_elem < n_elems; ++i_elem) { auto itElem = rModelPart.ElementsBegin() + i_elem; itElem->Calculate(ADVPROJ, Out, rCurrentProcessInfo); } rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA); rModelPart.GetCommunicator().AssembleCurrentData(DIVPROJ); rModelPart.GetCommunicator().AssembleCurrentData(ADVPROJ); // Correction for periodic conditions if (mrPeriodicIdVar.Key() != 0) { this->PeriodicConditionProjectionCorrection(rModelPart); } const double zero_tol = 1.0e-12; #pragma omp parallel for firstprivate(zero_tol) for (int i_node = 0; i_node < n_nodes; ++i_node){ auto iNode = rModelPart.NodesBegin() + i_node; if (iNode->FastGetSolutionStepValue(NODAL_AREA) < zero_tol) { iNode->FastGetSolutionStepValue(NODAL_AREA) = 1.0; } const double Area = iNode->FastGetSolutionStepValue(NODAL_AREA); iNode->FastGetSolutionStepValue(ADVPROJ) /= Area; iNode->FastGetSolutionStepValue(DIVPROJ) /= Area; } KRATOS_INFO("BDF2TurbulentScheme") << "Computing OSS projections" << std::endl; } /** On periodic boundaries, the nodal area and the values to project need to take into account contributions from elements on * both sides of the boundary. This is done using the conditions and the non-historical nodal data containers as follows:\n * 1- The partition that owns the PeriodicCondition adds the values on both nodes to their non-historical containers.\n * 2- The non-historical containers are added across processes, communicating the right value from the condition owner to all partitions.\n * 3- The value on all periodic nodes is replaced by the one received in step 2. */ void PeriodicConditionProjectionCorrection(ModelPart& rModelPart) { const int num_nodes = rModelPart.NumberOfNodes(); const int num_conditions = rModelPart.NumberOfConditions(); #pragma omp parallel for for (int i = 0; i < num_nodes; i++) { auto it_node = rModelPart.NodesBegin() + i; it_node->SetValue(NODAL_AREA,0.0); it_node->SetValue(ADVPROJ,ZeroVector(3)); it_node->SetValue(DIVPROJ,0.0); } #pragma omp parallel for for (int i = 0; i < num_conditions; i++) { auto it_cond = rModelPart.ConditionsBegin() + i; if(it_cond->Is(PERIODIC)) { this->AssemblePeriodicContributionToProjections(it_cond->GetGeometry()); } } rModelPart.GetCommunicator().AssembleNonHistoricalData(NODAL_AREA); rModelPart.GetCommunicator().AssembleNonHistoricalData(ADVPROJ); rModelPart.GetCommunicator().AssembleNonHistoricalData(DIVPROJ); #pragma omp parallel for for (int i = 0; i < num_nodes; i++) { auto it_node = rModelPart.NodesBegin() + i; this->CorrectContributionsOnPeriodicNode(*it_node); } } void AssemblePeriodicContributionToProjections(Geometry< Node<3> >& rGeometry) { unsigned int nodes_in_cond = rGeometry.PointsNumber(); double nodal_area = 0.0; array_1d<double,3> momentum_projection = ZeroVector(3); double mass_projection = 0.0; for ( unsigned int i = 0; i < nodes_in_cond; i++ ) { auto& r_node = rGeometry[i]; nodal_area += r_node.FastGetSolutionStepValue(NODAL_AREA); noalias(momentum_projection) += r_node.FastGetSolutionStepValue(ADVPROJ); mass_projection += r_node.FastGetSolutionStepValue(DIVPROJ); } for ( unsigned int i = 0; i < nodes_in_cond; i++ ) { auto& r_node = rGeometry[i]; /* Note that this loop is expected to be threadsafe in normal conditions, * since each node should belong to a single periodic link. However, I am * setting the locks for openmp in case that we try more complicated things * in the future (like having different periodic conditions for different * coordinate directions). */ r_node.SetLock(); r_node.GetValue(NODAL_AREA) = nodal_area; noalias(r_node.GetValue(ADVPROJ)) = momentum_projection; r_node.GetValue(DIVPROJ) = mass_projection; r_node.UnSetLock(); } } void CorrectContributionsOnPeriodicNode(Node<3>& rNode) { //TODO: This needs to be done in another manner as soon as we start using non-historical NODAL_AREA if (rNode.GetValue(NODAL_AREA) != 0.0) // Only periodic nodes will have a non-historical NODAL_AREA set. { rNode.FastGetSolutionStepValue(NODAL_AREA) = rNode.GetValue(NODAL_AREA); noalias(rNode.FastGetSolutionStepValue(ADVPROJ)) = rNode.GetValue(ADVPROJ); rNode.FastGetSolutionStepValue(DIVPROJ) = rNode.GetValue(DIVPROJ); } } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ /// Pointer to a turbulence model Process::Pointer mpTurbulenceModel = nullptr; RotationToolPointerType mpRotationTool = nullptr; typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater(); const Kratos::Variable<int>& mrPeriodicIdVar; // ///@} // ///@name Serialization // ///@{ // // friend class Serializer; // // virtual void save(Serializer& rSerializer) const // { // KRATOS_SERIALIZE_SAVE_BASE_CLASS(rSerializer, BaseType ); // rSerializer.save("mpTurbulenceModel",mpTurbulenceModel); // } // // virtual void load(Serializer& rSerializer) // { // KRATOS_SERIALIZE_LOAD_BASE_CLASS(rSerializer, BaseType ); // rSerializer.load("mpTurbulenceModel",mpTurbulenceModel); // } ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. BDF2TurbulentScheme & operator=(BDF2TurbulentScheme const& rOther) {} /// Copy constructor. BDF2TurbulentScheme(BDF2TurbulentScheme const& rOther) {} ///@} }; // Class BDF2TurbulentScheme ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// input stream function template<class TSparseSpace,class TDenseSpace> inline std::istream& operator >>(std::istream& rIStream,BDF2TurbulentScheme<TSparseSpace,TDenseSpace>& rThis) { return rIStream; } /// output stream function template<class TSparseSpace,class TDenseSpace> inline std::ostream& operator <<(std::ostream& rOStream,const BDF2TurbulentScheme<TSparseSpace,TDenseSpace>& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} ///@} addtogroup block } // namespace Kratos. #endif // KRATOS_BDF2_TURBULENT_SCHEME_H_INCLUDED defined
Questao03.c
//Programa que permite o usuario inserir a precisao do PI(versão paralelizada) //Criado por Gustavo Lopes Rodrigues //Versão atualizada: agora, o programa coloca no final o tempo que foi necessário para rodar o programa //Speedup medio na minha máquina: 0.35567s #include <stdio.h> #include <omp.h> //Função que encapsula o código do cálculo do PI double calcularPi(); //Função main int main(void){ double start = omp_get_wtime(); int precisao; scanf("%d", &precisao); double pi = calcularPi(precisao); printf("O valor de PI é: %f\n", pi); double finish = omp_get_wtime() - start; printf("tempo de execução = %f\n", finish); } double calcularPi(int precisao) { double x = 1, operacao = 0.0,resultado = 4.0; #pragma omp parallel for private(operacao) for(int i=0; i < precisao; i++){ #pragma omp critical x += 2; if ( i % 2 == 0 ) { operacao -= 4/x; } else { operacao += 4/x; } #pragma omp critical resultado += operacao; } return resultado; }
FourierTransform.h
#pragma once #include <omp.h> #include "ScalarField.h" #include "Grid.h" #include "Enums.h" #ifdef __USE_FFT__ #include "fftw3.h" #endif namespace pfc { namespace fourier_transform { inline Int3 getSizeOfComplexArray(Int3 sizeOfFP) { return Int3(sizeOfFP.x, sizeOfFP.y, sizeOfFP.z / 2 + 1); } enum Direction { RtoC, CtoR }; } class FourierTransformField { #ifdef __USE_FFT__ Int3 size; fftw_plan plans[2]; // RtoC/CtoR ScalarField<FP>* realField; ScalarField<complexFP>* complexField; #endif public: #ifdef __USE_FFT__ FourierTransformField() { plans[fourier_transform::Direction::RtoC] = 0; plans[fourier_transform::Direction::CtoR] = 0; } void initialize(ScalarField<FP>* _realField, ScalarField<complexFP>* _complexField, Int3 _size) { size = _size; realField = _realField; complexField = _complexField; createPlans(); } ~FourierTransformField() { destroyPlans(); } void doDirectFourierTransform() { fftw_execute(plans[fourier_transform::Direction::RtoC]); } void doInverseFourierTransform() { fftw_execute(plans[fourier_transform::Direction::CtoR]); ScalarField<FP>& res = *realField; #pragma omp parallel for for (int i = 0; i < size.x; i++) for (int j = 0; j < size.y; j++) //#pragma omp simd for (int k = 0; k < size.z; k++) res(i, j, k) /= (FP)size.x*size.y*size.z; } #else FourierTransformField() {} void initialize(ScalarField<FP>* _realField, ScalarField<complexFP>* _complexField, Int3 _size) {} void doDirectFourierTransform() {} void doInverseFourierTransform() {} ~FourierTransformField() {} #endif FourierTransformField(ScalarField<FP>* _realField, ScalarField<complexFP>* _complexField, Int3 _size) { initialize(_realField, _complexField, _size); } void doFourierTransform(fourier_transform::Direction direction) { switch (direction) { case fourier_transform::Direction::RtoC: doDirectFourierTransform(); break; case fourier_transform::Direction::CtoR: doInverseFourierTransform(); break; default: break; } } private: #ifdef __USE_FFT__ void createPlans() { int Nx = size.x, Ny = size.y, Nz = size.z; ScalarField<FP>& arrD = *(realField); ScalarField<complexFP>& arrC = *(complexField); #ifdef __USE_OMP__ fftw_plan_with_nthreads(omp_get_max_threads()); #endif plans[fourier_transform::Direction::RtoC] = fftw_plan_dft_r2c_3d(Nx, Ny, Nz, &(arrD(0, 0, 0)), (fftw_complex*)&(arrC(0, 0, 0)), FFTW_ESTIMATE); #ifdef __USE_OMP__ fftw_plan_with_nthreads(omp_get_max_threads()); #endif plans[fourier_transform::Direction::CtoR] = fftw_plan_dft_c2r_3d(Nx, Ny, Nz, (fftw_complex*)&(arrC(0, 0, 0)), &(arrD(0, 0, 0)), FFTW_ESTIMATE); } void destroyPlans() { if (plans[fourier_transform::Direction::RtoC] != 0) fftw_destroy_plan(plans[fourier_transform::Direction::RtoC]); if (plans[fourier_transform::Direction::CtoR] != 0) fftw_destroy_plan(plans[fourier_transform::Direction::CtoR]); } #endif }; class FourierTransformGrid { FourierTransformField transform[3][3]; // field, coordinate public: FourierTransformGrid() {} template<GridTypes gridType> void initialize(Grid<FP, gridType>* gridFP, Grid<complexFP, gridType>* gridCFP) { transform[Field::E][Coordinate::x].initialize(&gridFP->Ex, &gridCFP->Ex, gridFP->numCells); transform[Field::E][Coordinate::y].initialize(&gridFP->Ey, &gridCFP->Ey, gridFP->numCells); transform[Field::E][Coordinate::z].initialize(&gridFP->Ez, &gridCFP->Ez, gridFP->numCells); transform[Field::B][Coordinate::x].initialize(&gridFP->Bx, &gridCFP->Bx, gridFP->numCells); transform[Field::B][Coordinate::y].initialize(&gridFP->By, &gridCFP->By, gridFP->numCells); transform[Field::B][Coordinate::z].initialize(&gridFP->Bz, &gridCFP->Bz, gridFP->numCells); transform[Field::J][Coordinate::x].initialize(&gridFP->Jx, &gridCFP->Jx, gridFP->numCells); transform[Field::J][Coordinate::y].initialize(&gridFP->Jy, &gridCFP->Jy, gridFP->numCells); transform[Field::J][Coordinate::z].initialize(&gridFP->Jz, &gridCFP->Jz, gridFP->numCells); } void doDirectFourierTransform(Field field, Coordinate coord) { transform[field][coord].doDirectFourierTransform(); } void doInverseFourierTransform(Field field, Coordinate coord) { transform[field][coord].doInverseFourierTransform(); } void doFourierTransform(Field field, Coordinate coord, fourier_transform::Direction direction) { transform[field][coord].doFourierTransform(direction); } }; }
integral.c
/** * integral.c * * Counting integral with MPI or OpenMP * * @author pikryukov * @version 1.0 * * e-mail: kryukov@frtk.ru * * Copyright (C) Kryukov Pavel 2012 * for MIPT MPI course. */ /* C generic */ #include <stdio.h> /* fprintf */ #include <stdlib.h> /* exit */ #include <math.h> /* sin */ #include <assert.h> #ifdef USE_MPI /* If USE_MPI is defined, we use MPI, otherwise - OpenMP */ #include <mpi.h> #define ERRORPRINT(...) \ {if (!rank) fprintf(stderr, __VA_ARGS__); MPI_Finalize(); exit(1);} #define PRINT(...) {if (!rank) printf(__VA_ARGS__);} #else #define ERRORPRINT(...) {fprintf(stderr, __VA_ARGS__); exit(1);} #define PRINT(...) printf(__VA_ARGS__); #endif #ifndef M_PI #define M_PI 3.14159265358979323846 #endif #define FUNC(x) (sin(1 / (x))) #define ZERO(x) (1 / (M_PI * (x))) /* zero of FUNC. #1 is the greatest. */ #define N 10000 /* Number of zeroes to count integral on */ #define M 10000 /* Number of nodes between zeroes */ /** * Counts integral of FUNC from a to b usin * trapezoid method with M nodes * @param a left edge * @param b right edge * @return integral */ double monointegral(double a, double b) { int i; double tau = (b - a) / M; double start = a; double sum = 0.; for (i = 0; i < M; ++i) { /* Counting with trapezoids rule */ sum += tau * (FUNC(start + tau) + FUNC(start)) * 0.5; /* Go to next node */ start += tau; } return sum; } #ifdef USE_MPI /** * Splits N jobs to 'rank' thread * @param rank rank of current thread * @param size size of pool * @param start pointer to start number of job * @param finish pointer to finish number of job */ void splitfine(int rank, int size, int* start, int* finish) { /* We try to split numbers like this: */ /* 0 1 2 3 4 (5 == amount) 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 <- resRank 21 22 23 24 25 26 */ const unsigned resRank = size - N % size; const unsigned amount = N / size; *start = rank * amount; if (rank > resRank) *start += rank - resRank; *finish = *start + amount; if (rank >= resRank) ++(*finish); } #endif /** * Counts integral of FUNC from NODE(N + 1) to NODE(1) * in parallel threads * @param rank thread rank (if MPI) * @param size pool size (if MPI) */ void multiintegral(int rank, int size) { double res, sum = 0.; int start, finish, i; #ifdef USE_MPI splitfine(rank, size, &start, &finish); #else start = 0; finish = N; #pragma omp parallel for reduction (+: sum) private(i) #endif /* USE_MPI */ for (i = start; i < finish; ++i) { sum += monointegral(ZERO(i + 2), ZERO(i + 1)); } #ifdef USE_MPI MPI_Reduce(&sum, &res, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); #else res = sum; #endif /* USE_MPI */ PRINT("Integral of sin 1/x from %e to %e is %e\n", ZERO(N + 1), ZERO(1), res); } /** * Entry point of program * @param argc should be 1 * @param argv no arguments needed * @return 0 on success, 1 on error */ int main(int argc, char** argv) { int rank = 0, size = 0; #ifdef USE_MPI MPI_Init(&argc, &argv); double t = -MPI_Wtime(); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); #endif /* USE_MPI */ if (argc != 1) ERRORPRINT("Syntax error.\n No arguments at all\n"); multiintegral(rank, size); #ifdef USE_MPI PRINT("Time is %f s\n", t += MPI_Wtime()); MPI_Finalize(); #endif /* USE_MPI */ return 0; }
HALC.h
/* * SequencingProject.h * * Created on: Dec 2, 2015 * Author: llx */ #ifndef SEQUENCINGPROJECT_H_ #define SEQUENCINGPROJECT_H_ #include<string.h> #include<map> #include<fstream> #include<list> #include<iostream> #include<utility> #include<vector> #include<unordered_map> #include<sstream> #include<omp.h> static omp_lock_t mylock; class Ccutpoint { public: std::string contigname; int index; //start from 0 int longreadindex; bool ishead; bool changed = false; std::fstream::pos_type fileindex; //char strand; }; struct Nspasenode { int longreadheadindex; int longreadtailindex; int contigheadindex; int contigtailindex; int rawlongreadheadindex; int rawlongreadtailindex; std::string longreadname; Nspasenode() : longreadheadindex(0), longreadtailindex(0), contigheadindex(0), contigtailindex(0) { } }; class CSubcontig { public: int headindex; //start from 0 int tailindex; char strand; int longreadheadindex; int longreadtailindex; unsigned int similarity; std::string contigname; int headoffset; int tailoffset; unsigned long indexofsubcontigs; std::vector<Nspasenode>* Nspace; bool operator==(const CSubcontig& obj) const; CSubcontig() : headoffset(0), tailoffset(0), Nspace(NULL) { } }; struct Nnodeforsort { CSubcontig* me; }; class Ccontig { public: std::fstream::pos_type index; //start from 0 int length; //number of bases }; class Clongread { public: std::fstream::pos_type index; //start from 0 int length; bool corrected; Clongread() : corrected(false), length(0), index(0) { } }; inline bool ComIndex(Ccutpoint first, Ccutpoint second) { return first.index < second.index; } inline bool ComName(Ccutpoint first, Ccutpoint second) { return first.contigname < second.contigname; } namespace __gnu_cxx { struct map_hash { size_t operator()(const std::pair<long, long>& m) const; }; struct map_equal { bool operator()(const std::pair<long, long>& m1, const std::pair<long, long>& m2) const; }; } class Cfilebuffer { private: std::unordered_map<std::string, std::string> filebuffers; public: Cfilebuffer() { } ; Cfilebuffer(std::string filename, long size = 0) { std::ifstream fin(filename.c_str()); std::string fileContent; std::stringstream ss; #pragma omp critical { if (fin.is_open()) { ss << fin.rdbuf(); filebuffers[filename] = std::move(ss.str()); fin.close(); } } } void refreshbuffer(std::string filename, long size = 0) { if (filebuffers.find(filename) == filebuffers.end()) { omp_set_lock(&mylock); if(filebuffers.find(filename) != filebuffers.end()){ return; } std::ifstream infile(filename.c_str()); std::string fileContent; std::stringstream ss; if (infile.is_open()) { ss << infile.rdbuf(); filebuffers[filename] = std::move(ss.str()); infile.close(); } omp_unset_lock(&mylock); } } std::string Getstring(char *argv, std::fstream::pos_type begin, std::fstream::pos_type end); }; class CSubUndigraph { private: struct CSubcontigEx; static bool ComHeadIndex(CSubcontigEx first, CSubcontigEx second); public: static std::string lralignedseq; static std::string matchpattern; static std::string ctalignedseq; static std::vector<std::vector<CSubcontig> > contiglist; //contigs of a longread std::string longreadname; std::vector<CSubcontigEx> Subconitglist; //all subcontigs of a longread friend class CUndigraph; std::unordered_map<std::pair<unsigned long, unsigned long>, bool, __gnu_cxx ::map_hash, __gnu_cxx ::map_equal> edges[3]; private: int ctoffset; int lroffset; // int matchlength; int matchoffset; int numoflines(std::string& s, int begin, int end); int cutlongread(int length, CSubcontig& subcontig); int cutlongreadreverse(int length, CSubcontig& subcontig); //vector<CSubcontig> badcontigs; public: CSubUndigraph(); bool addToContigList(unsigned long head, unsigned long tail, char strand, int lrheadindex, int lrtailindex, int headoffset, int tailoffset, int ctheadindex, int cttailindex); static bool clearcontiglist(); int getlrlength(int length, unsigned int& similarity); int getlrlengthreverse(int length, unsigned int& similarity); bool getAlignInf(int lrheadindex, int lrtailindex, int headoffset, int tailoffset, char strand); bool getSubcontiglist(); bool drawLine(CSubcontigEx contig1, CSubcontigEx contig2); bool getEdges(); }; class CUndigraph { public: static std::unordered_map<std::pair<unsigned long, unsigned long>, unsigned short, __gnu_cxx ::map_hash, __gnu_cxx ::map_equal> graph[3]; static void MakeUndigraph(std::ifstream& alignfile); static std::vector<CSubUndigraph> subundigraphs; static std::vector<Nnodeforsort> Nnodes; static double anveragesupport; static void replaceN(); static void findanveragesupport(); static inline bool comContigName(Nnodeforsort first, Nnodeforsort second); }; class CMyVectorInt: public std::vector<int> { private: unsigned int biggestindex = 0; public: int getnext(); bool hasnext(); }; class Ccorrector { private: CUndigraph undigraph; // int* dist; // int* path; // std::vector<CMyVectorInt> pdist; // std::vector<CMyVectorInt> ppath; char* lrfilename; char* ctfilename; bool fpathbysupport(int index, int *&dist); bool fpathbysimilarity(int index, int *&dist); bool nfpathbysimilarity(int index, int n, std::vector<CMyVectorInt> &pdist); int froutebysupport(int index, int *&dist, int *&path); int froutebysimilarity(int index, int *&dist, int *&path); void nfroutebysimilarity(int index, int* counter, int j, int pathposition, int jposition, int *&path, std::vector<CMyVectorInt> &pdist, std::vector<CMyVectorInt> &ppath); void bestnrouteofsimilarity(int index, int n, int *path, std::vector<CMyVectorInt> &pdist, std::vector<CMyVectorInt> &ppath); int leastcostofn(int index, std::vector<CMyVectorInt> &path, bool &hasrepeat); std::string& changetoreverse(std::string& s); public: Ccorrector(char* lrfile, char* ctfile); bool findBestRouteBySimilarity(); bool findBestRouteBySupport(); bool findBestNRoute(int n); void docorrect(int subundigraphindex, int ppathindex, std::ofstream &correctedfile, std::ofstream &repeatfile, std::vector<CMyVectorInt> &ppath, Cfilebuffer &longreadbuffer, Cfilebuffer &contigbuffer, bool & hasrepeat); }; #endif /* SEQUENCINGPROJECT_H_ */
GB_unaryop__lnot_uint64_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint64_fp32 // op(A') function: GB_tran__lnot_uint64_fp32 // C type: uint64_t // A type: float // cast: uint64_t cij ; GB_CAST_UNSIGNED(cij,aij,64) // unaryop: cij = !(aij != 0) #define GB_ATYPE \ float #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint64_t z ; GB_CAST_UNSIGNED(z,x,64) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT64 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint64_fp32 ( uint64_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint64_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
SpatialConvolutionMM.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/SpatialConvolutionMM.c" #else /* note: due to write issues, this one cannot be parallelized as well as unfolded_copy */ static void nn_(unfolded_acc)(THTensor *finput, THTensor *input, int kW, int kH, int dW, int dH, int padW, int padH, int nInputPlane, int inputWidth, int inputHeight, int outputWidth, int outputHeight) { int nip; real *input_data = THTensor_(data)(input); real *finput_data = THTensor_(data)(finput); #pragma omp parallel for private(nip) for(nip = 0; nip < nInputPlane; nip++) { int kw, kh, y, x, ix, iy; for(kh = 0; kh < kH; kh++) { for(kw = 0; kw < kW; kw++) { real *src = finput_data + nip*(kH*kW*outputHeight*outputWidth) + kh*(kW*outputHeight*outputWidth) + kw*(outputHeight*outputWidth); real *dst = input_data + nip*(inputHeight*inputWidth); if (padW > 0 || padH > 0) { int lpad,rpad; for(y = 0; y < outputHeight; y++) { iy = y*dH - padH + kh; if (iy < 0 || iy >= inputHeight) { } else { if (dW==1){ ix = 0 - padW + kw; lpad = fmaxf(0,padW-kw); rpad = fmaxf(0,padW-(kW-kw-1)); THVector_(add)(dst+iy*inputWidth+ix+lpad, src+y*outputWidth+lpad, 1, outputWidth - lpad - rpad); /* note: THVector_add could handle 1 value better */ } else{ for (x=0; x<outputWidth; x++){ ix = x*dW - padW + kw; if (ix < 0 || ix >= inputWidth){ }else THVector_(add)(dst+iy*inputWidth+ix, src+y*outputWidth+x, 1, 1); } } } } } else { for(y = 0; y < outputHeight; y++) { iy = y*dH + kh; ix = 0 + kw; if (dW == 1 ) { THVector_(add)(dst+iy*inputWidth+ix, src+y*outputWidth, 1, outputWidth); /* note: THVector_add could handle 1 value better */ } else{ for(x = 0; x < outputWidth; x++) THVector_(add)(dst+iy*inputWidth+ix+x*dW, src+y*outputWidth+x, 1, 1); } } } } } } } static void nn_(unfolded_copy)(THTensor *finput, THTensor *input, int kW, int kH, int dW, int dH, int padW, int padH, int nInputPlane, int inputWidth, int inputHeight, int outputWidth, int outputHeight) { long k; real *input_data = THTensor_(data)(input); real *finput_data = THTensor_(data)(finput); #pragma omp parallel for private(k) for(k = 0; k < nInputPlane*kH*kW; k++) { int nip = k / (kH*kW); int rest = k % (kH*kW); int kh = rest / kW; int kw = rest % kW; int x,y,ix,iy; real *dst = finput_data + nip*(kH*kW*outputHeight*outputWidth) + kh*(kW*outputHeight*outputWidth) + kw*(outputHeight*outputWidth); real *src = input_data + nip*(inputHeight*inputWidth); if (padW > 0 || padH > 0) { int lpad,rpad; for(y = 0; y < outputHeight; y++) { iy = y*dH - padH + kh; if (iy < 0 || iy >= inputHeight) { memset(dst+y*outputWidth, 0, sizeof(real)*outputWidth); } else { if (dW==1){ ix = 0 - padW + kw; lpad = fmaxf(0,padW-kw); rpad = fmaxf(0,padW-(kW-kw-1)); if (outputWidth-rpad-lpad <= 0) { memset(dst+y*outputWidth, 0, sizeof(real)*outputWidth); } else { if (lpad > 0) memset(dst+y*outputWidth, 0, sizeof(real)*lpad); memcpy(dst+y*outputWidth+lpad, src+iy*inputWidth+ix+lpad, sizeof(real)*(outputWidth-rpad-lpad)); if (rpad > 0) memset(dst+y*outputWidth + outputWidth - rpad, 0, sizeof(real)*rpad); } } else{ for (x=0; x<outputWidth; x++){ ix = x*dW - padW + kw; if (ix < 0 || ix >= inputWidth) memset(dst+y*outputWidth+x, 0, sizeof(real)*1); else memcpy(dst+y*outputWidth+x, src+iy*inputWidth+ix, sizeof(real)*(1)); } } } } } else { for(y = 0; y < outputHeight; y++) { iy = y*dH + kh; ix = 0 + kw; if (dW == 1) memcpy(dst+y*outputWidth, src+iy*inputWidth+ix, sizeof(real)*outputWidth); else{ for (x=0; x<outputWidth; x++) memcpy(dst+y*outputWidth+x, src+iy*inputWidth+ix+x*dW, sizeof(real)*(1)); } } } } } static void nn_(SpatialConvolutionMM_updateOutput_frame)(THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, int kW, int kH, int dW, int dH, int padW, int padH, long nInputPlane, long inputWidth, long inputHeight, long nOutputPlane, long outputWidth, long outputHeight) { long i; THTensor *output2d; nn_(unfolded_copy)(finput, input, kW, kH, dW, dH, padW, padH, nInputPlane, inputWidth, inputHeight, outputWidth, outputHeight); output2d = THTensor_(newWithStorage2d)(output->storage, output->storageOffset, nOutputPlane, -1, outputHeight*outputWidth, -1); for(i = 0; i < nOutputPlane; i++) THVector_(fill)(output->storage->data+output->storageOffset+output->stride[0]*i, THTensor_(get1d)(bias, i), outputHeight*outputWidth); THTensor_(addmm)(output2d, 1, output2d, 1, weight, finput); THTensor_(free)(output2d); } static int nn_(SpatialConvolutionMM_updateOutput)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_Tensor); int kW = luaT_getfieldcheckint(L, 1, "kW"); int kH = luaT_getfieldcheckint(L, 1, "kH"); int dW = luaT_getfieldcheckint(L, 1, "dW"); int dH = luaT_getfieldcheckint(L, 1, "dH"); int padW = luaT_getfieldcheckint(L, 1, "padW"); int padH = luaT_getfieldcheckint(L, 1, "padH"); THTensor *finput = luaT_getfieldcheckudata(L, 1, "finput", torch_Tensor); THTensor *weight = luaT_getfieldcheckudata(L, 1, "weight", torch_Tensor); THTensor *bias = luaT_getfieldcheckudata(L, 1, "bias", torch_Tensor); THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor); int dimf = 0; int dimw = 2; int dimh = 1; long nInputPlane; long inputWidth; long inputHeight; long nOutputPlane; long outputWidth; long outputHeight; luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D(batch mode) tensor expected"); if (input->nDimension == 4) { dimf++; dimw++; dimh++; } nInputPlane = input->size[dimf]; inputWidth = input->size[dimw]; inputHeight = input->size[dimh]; nOutputPlane = weight->size[0]; outputWidth = (inputWidth + 2*padW - kW) / dW + 1; outputHeight = (inputHeight + 2*padH - kH) / dH + 1; if (outputWidth < 1 || outputHeight < 1) THError("Given input size: (%dx%dx%d). Calculated output size: (%dx%dx%d). Output size is too small", nInputPlane,inputHeight,inputWidth,nInputPlane,outputHeight,outputWidth); if(input->nDimension == 3) { THTensor_(resize2d)(finput, kW*kH*nInputPlane, outputHeight*outputWidth); THTensor_(resize3d)(output, nOutputPlane, outputHeight, outputWidth); nn_(SpatialConvolutionMM_updateOutput_frame)(input, output, weight, bias, finput, kW, kH, dW, dH, padW, padH, nInputPlane, inputWidth, inputHeight, nOutputPlane, outputWidth, outputHeight); } else { long T = input->size[0]; long t; THTensor_(resize3d)(finput, T, kW*kH*nInputPlane, outputHeight*outputWidth); THTensor_(resize4d)(output, T, nOutputPlane, outputHeight, outputWidth); #pragma omp parallel for private(t) for(t = 0; t < T; t++) { THTensor *input_t = THTensor_(newSelect)(input, 0, t); THTensor *output_t = THTensor_(newSelect)(output, 0, t); THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); nn_(SpatialConvolutionMM_updateOutput_frame)(input_t, output_t, weight, bias, finput_t, kW, kH, dW, dH, padW, padH, nInputPlane, inputWidth, inputHeight, nOutputPlane, outputWidth, outputHeight); THTensor_(free)(input_t); THTensor_(free)(output_t); THTensor_(free)(finput_t); } } return 1; } static void nn_(SpatialConvolutionMM_updateGradInput_frame)(THTensor *gradInput, THTensor *gradOutput, THTensor *weight, THTensor *fgradInput, int kW, int kH, int dW, int dH, int padW, int padH) { THTensor *gradOutput2d = THTensor_(newWithStorage2d)(gradOutput->storage, gradOutput->storageOffset, gradOutput->size[0], -1, gradOutput->size[1]*gradOutput->size[2], -1); THTensor_(addmm)(fgradInput, 0, fgradInput, 1, weight, gradOutput2d); THTensor_(free)(gradOutput2d); THTensor_(zero)(gradInput); nn_(unfolded_acc)(fgradInput, gradInput, kW, kH, dW, dH, padW, padH, gradInput->size[0], gradInput->size[2], gradInput->size[1], gradOutput->size[2], gradOutput->size[1]); } static int nn_(SpatialConvolutionMM_updateGradInput)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_Tensor); THTensor *gradOutput = luaT_checkudata(L, 3, torch_Tensor); int kW = luaT_getfieldcheckint(L, 1, "kW"); int kH = luaT_getfieldcheckint(L, 1, "kH"); int dW = luaT_getfieldcheckint(L, 1, "dW"); int dH = luaT_getfieldcheckint(L, 1, "dH"); int padW = luaT_getfieldcheckint(L, 1, "padW"); int padH = luaT_getfieldcheckint(L, 1, "padH"); int nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane"); THTensor *finput = luaT_getfieldcheckudata(L, 1, "finput", torch_Tensor); THTensor *fgradInput = luaT_getfieldcheckudata(L, 1, "fgradInput", torch_Tensor); THTensor *weight = luaT_getfieldcheckudata(L, 1, "weight", torch_Tensor); THTensor *gradInput = luaT_getfieldcheckudata(L, 1, "gradInput", torch_Tensor); THArgCheck( nOutputPlane == gradOutput->size[input->nDimension == 4 ? 1 : 0], 1, "Number of output features is not equal to nOutputPlane" ); THTensor_(resizeAs)(gradInput, input); THTensor_(resizeAs)(fgradInput, finput); THTensor_(transpose)(weight, weight, 0, 1); if(input->nDimension == 3) { nn_(SpatialConvolutionMM_updateGradInput_frame)(gradInput, gradOutput, weight, fgradInput, kW, kH, dW, dH, padW, padH); } else { long T = input->size[0]; long t; #pragma omp parallel for private(t) for(t = 0; t < T; t++) { THTensor *gradInput_t = THTensor_(newSelect)(gradInput, 0, t); THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t); nn_(SpatialConvolutionMM_updateGradInput_frame)(gradInput_t, gradOutput_t, weight, fgradInput_t, kW, kH, dW, dH, padW, padH); THTensor_(free)(gradInput_t); THTensor_(free)(gradOutput_t); THTensor_(free)(fgradInput_t); } } THTensor_(transpose)(weight, weight, 0, 1); return 1; } static void nn_(SpatialConvolutionMM_accGradParameters_frame)(THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, real scale) { long i; THTensor *gradOutput2d = THTensor_(newWithStorage2d)(gradOutput->storage, gradOutput->storageOffset, gradOutput->size[0], -1, gradOutput->size[1]*gradOutput->size[2], -1); THTensor_(transpose)(finput, finput, 0, 1); THTensor_(addmm)(gradWeight, 1, gradWeight, scale, gradOutput2d, finput); THTensor_(transpose)(finput, finput, 0, 1); for(i = 0; i < gradBias->size[0]; i++) { long k; real sum = 0; real *data = gradOutput2d->storage->data + gradOutput2d->storageOffset + i*gradOutput2d->stride[0]; for(k = 0; k < gradOutput2d->size[1]; k++) sum += data[k]; (gradBias->storage->data + gradBias->storageOffset)[i] += scale*sum; } THTensor_(free)(gradOutput2d); } static int nn_(SpatialConvolutionMM_accGradParameters)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_Tensor); THTensor *gradOutput = luaT_checkudata(L, 3, torch_Tensor); real scale = luaL_optnumber(L, 4, 1); int nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane"); THTensor *finput = luaT_getfieldcheckudata(L, 1, "finput", torch_Tensor); THTensor *gradWeight = luaT_getfieldcheckudata(L, 1, "gradWeight", torch_Tensor); THTensor *gradBias = luaT_getfieldcheckudata(L, 1, "gradBias", torch_Tensor); THArgCheck( nOutputPlane == gradOutput->size[input->nDimension == 4 ? 1 : 0], 1, "Number of output features is not equal to nOutputPlane" ); if(input->nDimension == 3) { nn_(SpatialConvolutionMM_accGradParameters_frame)(gradOutput, gradWeight, gradBias, finput, scale); } else { long T = input->size[0]; long t; for(t = 0; t < T; t++) { THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); nn_(SpatialConvolutionMM_accGradParameters_frame)(gradOutput_t, gradWeight, gradBias, finput_t, scale); THTensor_(free)(gradOutput_t); THTensor_(free)(finput_t); } } return 0; } static const struct luaL_Reg nn_(SpatialConvolutionMM__) [] = { {"SpatialConvolutionMM_updateOutput", nn_(SpatialConvolutionMM_updateOutput)}, {"SpatialConvolutionMM_updateGradInput", nn_(SpatialConvolutionMM_updateGradInput)}, {"SpatialConvolutionMM_accGradParameters", nn_(SpatialConvolutionMM_accGradParameters)}, {NULL, NULL} }; static void nn_(SpatialConvolutionMM_init)(lua_State *L) { luaT_pushmetatable(L, torch_Tensor); luaT_registeratname(L, nn_(SpatialConvolutionMM__), "nn"); lua_pop(L,1); } #endif
detector.c
#include "darknet.h" static int coco_ids[] = {1,2,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,67,70,72,73,74,75,76,77,78,79,80,81,82,84,85,86,87,88,89,90}; void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear) { list *options = read_data_cfg(datacfg); char *train_images = option_find_str(options, "train", "data/train.list"); char *backup_directory = option_find_str(options, "backup", "/backup/"); srand(time(0)); char *base = basecfg(cfgfile); printf("%s\n", base); float avg_loss = -1; network **nets = calloc(ngpus, sizeof(network)); srand(time(0)); int seed = rand(); int i; for(i = 0; i < ngpus; ++i){ srand(seed); #ifdef GPU cuda_set_device(gpus[i]); #endif nets[i] = load_network(cfgfile, weightfile, clear); nets[i]->learning_rate *= ngpus; } srand(time(0)); network *net = nets[0]; int imgs = net->batch * net->subdivisions * ngpus; printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); data train, buffer; layer l = net->layers[net->n - 1]; int classes = l.classes; float jitter = l.jitter; list *plist = get_paths(train_images); //int N = plist->size; char **paths = (char **)list_to_array(plist); load_args args = get_base_args(net); args.coords = l.coords; args.paths = paths; args.n = imgs; args.m = plist->size; args.classes = classes; args.jitter = jitter; args.num_boxes = l.max_boxes; args.d = &buffer; args.type = DETECTION_DATA; //args.type = INSTANCE_DATA; args.threads = 64; pthread_t load_thread = load_data(args); double time; int count = 0; //while(i*imgs < N*120){ while(get_current_batch(net) < net->max_batches){ if(l.random && count++%10 == 0){ printf("Resizing\n"); int dim = (rand() % 10 + 10) * 32; if (get_current_batch(net)+200 > net->max_batches) dim = 608; //int dim = (rand() % 4 + 16) * 32; printf("%d\n", dim); args.w = dim; args.h = dim; pthread_join(load_thread, 0); train = buffer; free_data(train); load_thread = load_data(args); #pragma omp parallel for for(i = 0; i < ngpus; ++i){ resize_network(nets[i], dim, dim); } net = nets[0]; } time=what_time_is_it_now(); pthread_join(load_thread, 0); train = buffer; load_thread = load_data(args); /* int k; for(k = 0; k < l.max_boxes; ++k){ box b = float_to_box(train.y.vals[10] + 1 + k*5); if(!b.x) break; printf("loaded: %f %f %f %f\n", b.x, b.y, b.w, b.h); } */ /* int zz; for(zz = 0; zz < train.X.cols; ++zz){ image im = float_to_image(net->w, net->h, 3, train.X.vals[zz]); int k; for(k = 0; k < l.max_boxes; ++k){ box b = float_to_box(train.y.vals[zz] + k*5, 1); printf("%f %f %f %f\n", b.x, b.y, b.w, b.h); draw_bbox(im, b, 1, 1,0,0); } show_image(im, "truth11"); cvWaitKey(0); save_image(im, "truth11"); } */ printf("Loaded: %lf seconds\n", what_time_is_it_now()-time); time=what_time_is_it_now(); float loss = 0; #ifdef GPU if(ngpus == 1){ loss = train_network(net, train); } else { loss = train_networks(nets, ngpus, train, 4); } #else loss = train_network(net, train); #endif if (avg_loss < 0) avg_loss = loss; avg_loss = avg_loss*.9 + loss*.1; i = get_current_batch(net); printf("%ld: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), loss, avg_loss, get_current_rate(net), what_time_is_it_now()-time, i*imgs); if(i%100==0){ #ifdef GPU if(ngpus != 1) sync_nets(nets, ngpus, 0); #endif char buff[256]; sprintf(buff, "%s/%s.backup", backup_directory, base); save_weights(net, buff); } if(i%10000==0 || (i < 1000 && i%100 == 0)){ #ifdef GPU if(ngpus != 1) sync_nets(nets, ngpus, 0); #endif char buff[256]; sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i); save_weights(net, buff); } free_data(train); } #ifdef GPU if(ngpus != 1) sync_nets(nets, ngpus, 0); #endif char buff[256]; sprintf(buff, "%s/%s_final.weights", backup_directory, base); save_weights(net, buff); } static int get_coco_image_id(char *filename) { char *p = strrchr(filename, '/'); char *c = strrchr(filename, '_'); if(c) p = c; return atoi(p+1); } static void print_cocos(FILE *fp, char *image_path, detection *dets, int num_boxes, int classes, int w, int h) { int i, j; int image_id = get_coco_image_id(image_path); for(i = 0; i < num_boxes; ++i){ float xmin = dets[i].bbox.x - dets[i].bbox.w/2.; float xmax = dets[i].bbox.x + dets[i].bbox.w/2.; float ymin = dets[i].bbox.y - dets[i].bbox.h/2.; float ymax = dets[i].bbox.y + dets[i].bbox.h/2.; if (xmin < 0) xmin = 0; if (ymin < 0) ymin = 0; if (xmax > w) xmax = w; if (ymax > h) ymax = h; float bx = xmin; float by = ymin; float bw = xmax - xmin; float bh = ymax - ymin; for(j = 0; j < classes; ++j){ if (dets[i].prob[j]) fprintf(fp, "{\"image_id\":%d, \"category_id\":%d, \"bbox\":[%f, %f, %f, %f], \"score\":%f},\n", image_id, coco_ids[j], bx, by, bw, bh, dets[i].prob[j]); } } } void print_detector_detections(FILE **fps, char *id, detection *dets, int total, int classes, int w, int h) { int i, j; for(i = 0; i < total; ++i){ float xmin = dets[i].bbox.x - dets[i].bbox.w/2. + 1; float xmax = dets[i].bbox.x + dets[i].bbox.w/2. + 1; float ymin = dets[i].bbox.y - dets[i].bbox.h/2. + 1; float ymax = dets[i].bbox.y + dets[i].bbox.h/2. + 1; if (xmin < 1) xmin = 1; if (ymin < 1) ymin = 1; if (xmax > w) xmax = w; if (ymax > h) ymax = h; for(j = 0; j < classes; ++j){ if (dets[i].prob[j]) fprintf(fps[j], "%s %f %f %f %f %f\n", id, dets[i].prob[j], xmin, ymin, xmax, ymax); } } } void print_imagenet_detections(FILE *fp, int id, detection *dets, int total, int classes, int w, int h) { int i, j; for(i = 0; i < total; ++i){ float xmin = dets[i].bbox.x - dets[i].bbox.w/2.; float xmax = dets[i].bbox.x + dets[i].bbox.w/2.; float ymin = dets[i].bbox.y - dets[i].bbox.h/2.; float ymax = dets[i].bbox.y + dets[i].bbox.h/2.; if (xmin < 0) xmin = 0; if (ymin < 0) ymin = 0; if (xmax > w) xmax = w; if (ymax > h) ymax = h; for(j = 0; j < classes; ++j){ int class = j; if (dets[i].prob[class]) fprintf(fp, "%d %d %f %f %f %f %f\n", id, j+1, dets[i].prob[class], xmin, ymin, xmax, ymax); } } } void validate_detector_flip(char *datacfg, char *cfgfile, char *weightfile, char *outfile) { int j; list *options = read_data_cfg(datacfg); char *valid_images = option_find_str(options, "valid", "data/train.list"); char *name_list = option_find_str(options, "names", "data/names.list"); char *prefix = option_find_str(options, "results", "results"); char **names = get_labels(name_list); char *mapf = option_find_str(options, "map", 0); int *map = 0; if (mapf) map = read_map(mapf); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 2); fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); srand(time(0)); list *plist = get_paths(valid_images); char **paths = (char **)list_to_array(plist); layer l = net->layers[net->n-1]; int classes = l.classes; char buff[1024]; char *type = option_find_str(options, "eval", "voc"); FILE *fp = 0; FILE **fps = 0; int coco = 0; int imagenet = 0; if(0==strcmp(type, "coco")){ if(!outfile) outfile = "coco_results"; snprintf(buff, 1024, "%s/%s.json", prefix, outfile); fp = fopen(buff, "w"); fprintf(fp, "[\n"); coco = 1; } else if(0==strcmp(type, "imagenet")){ if(!outfile) outfile = "imagenet-detection"; snprintf(buff, 1024, "%s/%s.txt", prefix, outfile); fp = fopen(buff, "w"); imagenet = 1; classes = 200; } else { if(!outfile) outfile = "comp4_det_test_"; fps = calloc(classes, sizeof(FILE *)); for(j = 0; j < classes; ++j){ snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]); fps[j] = fopen(buff, "w"); } } int m = plist->size; int i=0; int t; float thresh = .005; float nms = .45; int nthreads = 4; image *val = calloc(nthreads, sizeof(image)); image *val_resized = calloc(nthreads, sizeof(image)); image *buf = calloc(nthreads, sizeof(image)); image *buf_resized = calloc(nthreads, sizeof(image)); pthread_t *thr = calloc(nthreads, sizeof(pthread_t)); image input = make_image(net->w, net->h, net->c*2); load_args args = {0}; args.w = net->w; args.h = net->h; //args.type = IMAGE_DATA; args.type = LETTERBOX_DATA; for(t = 0; t < nthreads; ++t){ args.path = paths[i+t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } double start = what_time_is_it_now(); for(i = nthreads; i < m+nthreads; i += nthreads){ fprintf(stderr, "%d\n", i); for(t = 0; t < nthreads && i+t-nthreads < m; ++t){ pthread_join(thr[t], 0); val[t] = buf[t]; val_resized[t] = buf_resized[t]; } for(t = 0; t < nthreads && i+t < m; ++t){ args.path = paths[i+t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } for(t = 0; t < nthreads && i+t-nthreads < m; ++t){ char *path = paths[i+t-nthreads]; char *id = basecfg(path); copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data, 1); flip_image(val_resized[t]); copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data + net->w*net->h*net->c, 1); network_predict(net, input.data); int w = val[t].w; int h = val[t].h; int num = 0; detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &num); if (nms) do_nms_sort(dets, num, classes, nms); if (coco){ print_cocos(fp, path, dets, num, classes, w, h); } else if (imagenet){ print_imagenet_detections(fp, i+t-nthreads+1, dets, num, classes, w, h); } else { print_detector_detections(fps, id, dets, num, classes, w, h); } free_detections(dets, num); free(id); free_image(val[t]); free_image(val_resized[t]); } } for(j = 0; j < classes; ++j){ if(fps) fclose(fps[j]); } if(coco){ fseek(fp, -2, SEEK_CUR); fprintf(fp, "\n]\n"); fclose(fp); } fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start); } void validate_detector(char *datacfg, char *cfgfile, char *weightfile, char *outfile) { int j; list *options = read_data_cfg(datacfg); char *valid_images = option_find_str(options, "valid", "data/train.list"); char *name_list = option_find_str(options, "names", "data/names.list"); char *prefix = option_find_str(options, "results", "results"); char **names = get_labels(name_list); char *mapf = option_find_str(options, "map", 0); int *map = 0; if (mapf) map = read_map(mapf); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); srand(time(0)); list *plist = get_paths(valid_images); char **paths = (char **)list_to_array(plist); layer l = net->layers[net->n-1]; int classes = l.classes; char buff[1024]; char *type = option_find_str(options, "eval", "voc"); FILE *fp = 0; FILE **fps = 0; int coco = 0; int imagenet = 0; if(0==strcmp(type, "coco")){ if(!outfile) outfile = "coco_results"; snprintf(buff, 1024, "%s/%s.json", prefix, outfile); fp = fopen(buff, "w"); fprintf(fp, "[\n"); coco = 1; } else if(0==strcmp(type, "imagenet")){ if(!outfile) outfile = "imagenet-detection"; snprintf(buff, 1024, "%s/%s.txt", prefix, outfile); fp = fopen(buff, "w"); imagenet = 1; classes = 200; } else { if(!outfile) outfile = "comp4_det_test_"; fps = calloc(classes, sizeof(FILE *)); for(j = 0; j < classes; ++j){ snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]); fps[j] = fopen(buff, "w"); } } int m = plist->size; int i=0; int t; float thresh = .005; float nms = .45; int nthreads = 4; image *val = calloc(nthreads, sizeof(image)); image *val_resized = calloc(nthreads, sizeof(image)); image *buf = calloc(nthreads, sizeof(image)); image *buf_resized = calloc(nthreads, sizeof(image)); pthread_t *thr = calloc(nthreads, sizeof(pthread_t)); load_args args = {0}; args.w = net->w; args.h = net->h; //args.type = IMAGE_DATA; args.type = LETTERBOX_DATA; for(t = 0; t < nthreads; ++t){ args.path = paths[i+t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } double start = what_time_is_it_now(); for(i = nthreads; i < m+nthreads; i += nthreads){ fprintf(stderr, "%d\n", i); for(t = 0; t < nthreads && i+t-nthreads < m; ++t){ pthread_join(thr[t], 0); val[t] = buf[t]; val_resized[t] = buf_resized[t]; } for(t = 0; t < nthreads && i+t < m; ++t){ args.path = paths[i+t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } for(t = 0; t < nthreads && i+t-nthreads < m; ++t){ char *path = paths[i+t-nthreads]; char *id = basecfg(path); float *X = val_resized[t].data; network_predict(net, X); int w = val[t].w; int h = val[t].h; int nboxes = 0; detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &nboxes); if (nms) do_nms_sort(dets, nboxes, classes, nms); if (coco){ print_cocos(fp, path, dets, nboxes, classes, w, h); } else if (imagenet){ print_imagenet_detections(fp, i+t-nthreads+1, dets, nboxes, classes, w, h); } else { print_detector_detections(fps, id, dets, nboxes, classes, w, h); } free_detections(dets, nboxes); free(id); free_image(val[t]); free_image(val_resized[t]); } } for(j = 0; j < classes; ++j){ if(fps) fclose(fps[j]); } if(coco){ fseek(fp, -2, SEEK_CUR); fprintf(fp, "\n]\n"); fclose(fp); } fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start); } void validate_detector_recall(char *cfgfile, char *weightfile) { network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); srand(time(0)); list *plist = get_paths("data/coco_val_5k.list"); char **paths = (char **)list_to_array(plist); layer l = net->layers[net->n-1]; int j, k; int m = plist->size; int i=0; float thresh = .001; float iou_thresh = .5; float nms = .4; int total = 0; int correct = 0; int proposals = 0; float avg_iou = 0; for(i = 0; i < m; ++i){ char *path = paths[i]; image orig = load_image_color(path, 0, 0); image sized = resize_image(orig, net->w, net->h); char *id = basecfg(path); network_predict(net, sized.data); int nboxes = 0; detection *dets = get_network_boxes(net, sized.w, sized.h, thresh, .5, 0, 1, &nboxes); if (nms) do_nms_obj(dets, nboxes, 1, nms); char labelpath[4096]; find_replace(path, "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int num_labels = 0; box_label *truth = read_boxes(labelpath, &num_labels); for(k = 0; k < nboxes; ++k){ if(dets[k].objectness > thresh){ ++proposals; } } for (j = 0; j < num_labels; ++j) { ++total; box t = {truth[j].x, truth[j].y, truth[j].w, truth[j].h}; float best_iou = 0; for(k = 0; k < l.w*l.h*l.n; ++k){ float iou = box_iou(dets[k].bbox, t); if(dets[k].objectness > thresh && iou > best_iou){ best_iou = iou; } } avg_iou += best_iou; if(best_iou > iou_thresh){ ++correct; } } fprintf(stderr, "%5d %5d %5d\tRPs/Img: %.2f\tIOU: %.2f%%\tRecall:%.2f%%\n", i, correct, total, (float)proposals/(i+1), avg_iou*100/total, 100.*correct/total); free(id); free_image(orig); free_image(sized); } } void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filename, float thresh, float hier_thresh, char *outfile, int fullscreen) { list *options = read_data_cfg(datacfg); char *name_list = option_find_str(options, "names", "data/names.list"); char **names = get_labels(name_list); image **alphabet = load_alphabet(); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); srand(2222222); double time; char buff[256]; char *input = buff; float nms=.45; while(1){ if(filename){ strncpy(input, filename, 256); } else { printf("Enter Image Path: "); fflush(stdout); input = fgets(input, 256, stdin); if(!input) return; strtok(input, "\n"); } image im = load_image_color(input,0,0); image sized = letterbox_image(im, net->w, net->h); //image sized = resize_image(im, net->w, net->h); //image sized2 = resize_max(im, net->w); //image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h); //resize_network(net, sized.w, sized.h); layer l = net->layers[net->n-1]; float *X = sized.data; time=what_time_is_it_now(); network_predict(net, X); printf("%s: Predicted in %f seconds.\n", input, what_time_is_it_now()-time); int nboxes = 0; detection *dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes); //printf("%d\n", nboxes); //if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms); if (nms) do_nms_sort(dets, nboxes, l.classes, nms); draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes); free_detections(dets, nboxes); if(outfile){ save_image(im, outfile); } else{ save_image(im, "predictions"); #ifdef OPENCV cvNamedWindow("predictions", CV_WINDOW_NORMAL); if(fullscreen){ cvSetWindowProperty("predictions", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN); } show_image(im, "predictions", 0); #endif } free_image(im); free_image(sized); if (filename) break; } } /* void censor_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip) { #ifdef OPENCV char *base = basecfg(cfgfile); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); srand(2222222); CvCapture * cap; int w = 1280; int h = 720; if(filename){ cap = cvCaptureFromFile(filename); }else{ cap = cvCaptureFromCAM(cam_index); } if(w){ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w); } if(h){ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h); } if(!cap) error("Couldn't connect to webcam.\n"); cvNamedWindow(base, CV_WINDOW_NORMAL); cvResizeWindow(base, 512, 512); float fps = 0; int i; float nms = .45; while(1){ image in = get_image_from_stream(cap); //image in_s = resize_image(in, net->w, net->h); image in_s = letterbox_image(in, net->w, net->h); layer l = net->layers[net->n-1]; float *X = in_s.data; network_predict(net, X); int nboxes = 0; detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 0, &nboxes); //if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms); if (nms) do_nms_sort(dets, nboxes, l.classes, nms); for(i = 0; i < nboxes; ++i){ if(dets[i].prob[class] > thresh){ box b = dets[i].bbox; int left = b.x-b.w/2.; int top = b.y-b.h/2.; censor_image(in, left, top, b.w, b.h); } } show_image(in, base); cvWaitKey(10); free_detections(dets, nboxes); free_image(in_s); free_image(in); float curr = 0; fps = .9*fps + .1*curr; for(i = 0; i < skip; ++i){ image in = get_image_from_stream(cap); free_image(in); } } #endif } void extract_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip) { #ifdef OPENCV char *base = basecfg(cfgfile); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); srand(2222222); CvCapture * cap; int w = 1280; int h = 720; if(filename){ cap = cvCaptureFromFile(filename); }else{ cap = cvCaptureFromCAM(cam_index); } if(w){ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w); } if(h){ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h); } if(!cap) error("Couldn't connect to webcam.\n"); cvNamedWindow(base, CV_WINDOW_NORMAL); cvResizeWindow(base, 512, 512); float fps = 0; int i; int count = 0; float nms = .45; while(1){ image in = get_image_from_stream(cap); //image in_s = resize_image(in, net->w, net->h); image in_s = letterbox_image(in, net->w, net->h); layer l = net->layers[net->n-1]; show_image(in, base); int nboxes = 0; float *X = in_s.data; network_predict(net, X); detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 1, &nboxes); //if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms); if (nms) do_nms_sort(dets, nboxes, l.classes, nms); for(i = 0; i < nboxes; ++i){ if(dets[i].prob[class] > thresh){ box b = dets[i].bbox; int size = b.w*in.w > b.h*in.h ? b.w*in.w : b.h*in.h; int dx = b.x*in.w-size/2.; int dy = b.y*in.h-size/2.; image bim = crop_image(in, dx, dy, size, size); char buff[2048]; sprintf(buff, "results/extract/%07d", count); ++count; save_image(bim, buff); free_image(bim); } } free_detections(dets, nboxes); free_image(in_s); free_image(in); float curr = 0; fps = .9*fps + .1*curr; for(i = 0; i < skip; ++i){ image in = get_image_from_stream(cap); free_image(in); } } #endif } */ /* void network_detect(network *net, image im, float thresh, float hier_thresh, float nms, detection *dets) { network_predict_image(net, im); layer l = net->layers[net->n-1]; int nboxes = num_boxes(net); fill_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 0, dets); if (nms) do_nms_sort(dets, nboxes, l.classes, nms); } */ void run_detector(int argc, char **argv) { char *prefix = find_char_arg(argc, argv, "-prefix", 0); float thresh = find_float_arg(argc, argv, "-thresh", .5); float hier_thresh = find_float_arg(argc, argv, "-hier", .5); int cam_index = find_int_arg(argc, argv, "-c", 0); int frame_skip = find_int_arg(argc, argv, "-s", 0); int avg = find_int_arg(argc, argv, "-avg", 3); if(argc < 4){ fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]); return; } char *gpu_list = find_char_arg(argc, argv, "-gpus", 0); char *outfile = find_char_arg(argc, argv, "-out", 0); int *gpus = 0; int gpu = 0; int ngpus = 0; if(gpu_list){ printf("%s\n", gpu_list); int len = strlen(gpu_list); ngpus = 1; int i; for(i = 0; i < len; ++i){ if (gpu_list[i] == ',') ++ngpus; } gpus = calloc(ngpus, sizeof(int)); for(i = 0; i < ngpus; ++i){ gpus[i] = atoi(gpu_list); gpu_list = strchr(gpu_list, ',')+1; } } else { gpu = gpu_index; gpus = &gpu; ngpus = 1; } int clear = find_arg(argc, argv, "-clear"); int fullscreen = find_arg(argc, argv, "-fullscreen"); int width = find_int_arg(argc, argv, "-w", 0); int height = find_int_arg(argc, argv, "-h", 0); int fps = find_int_arg(argc, argv, "-fps", 0); //int class = find_int_arg(argc, argv, "-class", 0); char *datacfg = argv[3]; char *cfg = argv[4]; char *weights = (argc > 5) ? argv[5] : 0; char *filename = (argc > 6) ? argv[6]: 0; if(0==strcmp(argv[2], "test")) test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh, outfile, fullscreen); else if(0==strcmp(argv[2], "train")) train_detector(datacfg, cfg, weights, gpus, ngpus, clear); else if(0==strcmp(argv[2], "valid")) validate_detector(datacfg, cfg, weights, outfile); else if(0==strcmp(argv[2], "valid2")) validate_detector_flip(datacfg, cfg, weights, outfile); else if(0==strcmp(argv[2], "recall")) validate_detector_recall(cfg, weights); else if(0==strcmp(argv[2], "demo")) { list *options = read_data_cfg(datacfg); int classes = option_find_int(options, "classes", 20); char *name_list = option_find_str(options, "names", "data/names.list"); char **names = get_labels(name_list); demo(cfg, weights, thresh, cam_index, filename, names, classes, frame_skip, prefix, avg, hier_thresh, width, height, fps, fullscreen); } //else if(0==strcmp(argv[2], "extract")) extract_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip); //else if(0==strcmp(argv[2], "censor")) censor_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip); }
singlenode_spmspv3.h
/****************************************************************************** * ** Copyright (c) 2016, Intel Corporation ** * ** All rights reserved. ** * ** ** * ** Redistribution and use in source and binary forms, with or without ** * ** modification, are permitted provided that the following conditions ** * ** are met: ** * ** 1. Redistributions of source code must retain the above copyright ** * ** notice, this list of conditions and the following disclaimer. ** * ** 2. Redistributions in binary form must reproduce the above copyright ** * ** notice, this list of conditions and the following disclaimer in the ** * ** documentation and/or other materials provided with the distribution. ** * ** 3. Neither the name of the copyright holder nor the names of its ** * ** contributors may be used to endorse or promote products derived ** * ** from this software without specific prior written permission. ** * ** ** * ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ** * ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ** * ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ** * ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ** * ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** * ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED ** * ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ** * ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ** * ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ** * ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** * ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * ******************************************************************************/ /* Michael Anderson (Intel Corp.) * * ******************************************************************************/ #ifndef SRC_SINGLENODE_SPMSPV3_H_ #define SRC_SINGLENODE_SPMSPV3_H_ #include <xmmintrin.h> #include "src/bitvector.h" template <typename Ta, typename Tx, typename Tvp, typename Ty> void my_spmspv3(int* row_inds, int* col_ptrs, int* col_indices, Ta* vals, int num_partitions, int* row_pointers, int* col_starts, int* edge_pointers, Tx* xvalue, int * xbit_vector, Tvp * vpvalue, int * vpbit_vector, Ty * yvalue, int * ybit_vector, int m, int n, int* nnz, Ty (*op_mul)(Ta, Tx, Tvp), Ty (*op_add)(Ty, Ty)) { #pragma omp parallel for schedule(dynamic, 1) for (int p = 0; p < num_partitions; p++) { // For each column const int* column_offset = col_indices + col_starts[p]; const int* partitioned_row_offset = row_inds + edge_pointers[p]; const Ta* partitioned_val_offset = vals + edge_pointers[p]; const int* col_ptrs_cur = col_ptrs + col_starts[p]; for (int j = 0; j < (col_starts[p + 1] - col_starts[p]) - 1; j++) { int col_index = col_indices[col_starts[p] + j]; if(get_bitvector(col_index, xbit_vector)) { Tx Xval = xvalue[col_index]; Tvp VPVal = vpvalue[col_index]; assert(get_bitvector(col_index, vpbit_vector)); _mm_prefetch((char*)(xvalue + column_offset[j + 4]), _MM_HINT_T0); int nz_idx = col_ptrs_cur[j]; for (; nz_idx < col_ptrs_cur[j + 1]; nz_idx++) { int row_ind = partitioned_row_offset[nz_idx]; Ta Aval = partitioned_val_offset[nz_idx]; if(get_bitvector(row_ind, ybit_vector)) { yvalue[row_ind] = op_add(yvalue[row_ind], op_mul(Aval, Xval, VPVal)); } else { yvalue[row_ind] = op_mul(Aval, Xval, VPVal); set_bitvector(row_ind, ybit_vector); } } } } } for (int p = 0; p < num_partitions; p++) { // nnz += new_nnz[p]; } *nnz = m * n; } #endif // SRC_SINGLENODE_SPMSPV3_H_
APPROX2.h
#ifndef APPROX2_H #define APPROX2_H #include <string> #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> #include <stdio.h> /* printf */ #include <time.h> #include <fstream> #include <algorithm> #include <iomanip> #include <ctime> #include <math.h> #include <omp.h> //This class implements the method APPROX presented in the paper Accelerated, parallel and proximal coordinate descent, by Fercoq & Richtarik. /* The optimization problem to solve is: min f(x)+ P(x) */ template<typename L, typename D> class APPROX2 { protected: // parameters D mu_f; D mu_psi; L n; // x\in \R^n L tau; //number of threads on each node/computer D sumofLi; // variables std::vector<D> u; std::vector<D> z; std::vector<D> x; std::vector<D> v; std::vector<D> t; D gamma; D theta; // sampling variables std::vector<D> proba_vector; std::vector<D> S; std::vector<D> all_n; std::vector<D> sampled; D max_p; // auxiliary variables D tauovern; D novertau; D novertau2; D novertau3; D novertau4; D taumuovern; // variables for print D primal_value; D dual_value; D gradient_norm; D subgradient; D epsilon; L max_nb_loops; L evaluation; D delta; //primal dual gap D delta2; //distance to subgradient ofstream samp; L nb_iters; L nb_of_iters_per_loop; D running_time; L print_every_N; L mod; public: gsl_rng * rng; virtual inline D partial_i_of_f(L i){return D(NULL);} virtual inline D compute_prox(D, D, D, L){return D(NULL);} virtual inline void compute_primal_value() {} virtual inline void compute_dual_value(){} virtual inline void compute_gradient_norm(){} virtual inline void set_v(){} virtual inline void set_p(){} virtual inline void update_z_coordinate( L, D){} virtual inline void update_u_coordinate( L, D){} virtual inline void update_x_coordinate( L, D){} APPROX2() { } void set_rng() { gsl_rng_env_setup(); const gsl_rng_type * T; T = gsl_rng_default; rng = gsl_rng_alloc(T); gsl_rng_set(rng,time(NULL)); //gsl_rng_set(rng, 27432042); } // sample i with probability pi=proba_vector[i] L sampling() { //L i=(floor)(gsl_rng_uniform(rng)*n); L i=gsl_rng_uniform_int(rng, n); if(tau==1) { D y=gsl_rng_uniform(rng); while(y*max_p>proba_vector[i]) { i=(floor)(gsl_rng_uniform(rng)*n); y=gsl_rng_uniform(rng); } } return i; } // sample S void batch_sampling() { if(tau<n) { L i=sampling(); for(L k=0;k<tau;k++) { while(sampled[i]==1) { i=sampling(); } sampled[i]=1; S[k]=i; } for(L k=0;k<tau;k++) { sampled[S[k]]=0; } } else { S=all_n; //cout<<"s=all_n"<<endl; } } void compute_x() { for(L i=0;i<n;i++) x[i]=gamma*u[i]+z[i]; } void update_theta() { if(mod==1){ D theta2=theta*theta; D tmp=mu_f+mu_psi-theta2*novertau2-mu_psi*(theta+1)*novertau; D tmp2=theta2*novertau4+theta*mu_psi*novertau3; D tmp3=mu_f+mu_psi-novertau2*theta2-novertau*mu_psi*(theta+1); theta=0.5/novertau2*(sqrt(tmp*tmp+4*tmp2)+tmp3); } } void initialize(vector<D> & x0, L val_tau, D val_mu_f, D val_mu_psi, L eval, L p_N, L val_mod) { tau=val_tau; theta=tau/(n+0.0); tauovern=tau/(n+0.0); novertau=n/(tau+0.0); novertau2=novertau*novertau; novertau3=novertau2*novertau; novertau4=novertau2*novertau2; all_n.resize(n,0); for(L i=0;i<n;i++) all_n[i]=i; gamma=1; mu_f=val_mu_f; mu_psi=val_mu_psi; evaluation=eval; nb_of_iters_per_loop=floor(max(1.,n/(tau+0.0))); print_every_N=p_N; mod=val_mod; taumuovern=tau*mu_f/(n+0.0); delta=std::numeric_limits<double>::max(); gradient_norm=std::numeric_limits<double>::max(); u.clear(); u.resize(n,0); z.clear(); z.resize(n,0); x.clear(); x.resize(n,0); for(L i=0;i<n;i++) { z[i]=x0[i]; x[i]=x0[i]; } sampled.clear(); sampled.resize(n,0); S.clear(); S.resize(tau,0); t.clear(); t.resize(n,0); set_v(); set_p(); set_rng(); if(evaluation==1) { compute_primal_value(); compute_dual_value(); cout<<"initial primal value="<<primal_value<<endl; cout<<"initial dual value="<<dual_value<<endl; } else if(evaluation==2) { compute_primal_value(); compute_gradient_norm(); cout<<"initial primal value="<<primal_value<<endl; cout<<"initial gradient norm="<<gradient_norm<<endl; } running_time=0; cout<<"finished APPROX initializing"<<endl; } void compute_and_record_result() { if(evaluation==1&&nb_iters%print_every_N==0) { compute_primal_value(); compute_dual_value(); compute_gradient_norm(); delta=primal_value-dual_value; cout<<setprecision(9)<<(0.0+nb_iters)<<"; "<<running_time<<"; primal dual gap="<<delta<<" primal value="<<primal_value<<"; dual value="<<dual_value<<endl; samp<<(0.0+nb_iters)<<" "<<delta<<" "<<running_time<<" "<<primal_value<<" "<<dual_value<<" "<<gradient_norm<<endl; gradient_norm=std::numeric_limits<double>::max(); } else if(evaluation==2&&nb_iters%print_every_N==0) { compute_primal_value(); compute_gradient_norm(); cout<<setprecision(9)<<(0.0+nb_iters)<<"; "<<running_time<<"; gradient norm="<<gradient_norm<<" primal value="<<primal_value<<endl; samp<<(0.0+nb_iters)<<" "<<gradient_norm<<" "<<running_time<<" "<<primal_value<<endl; } else if(evaluation==3&&nb_iters%print_every_N==0) //running APPROX for solving the subproblem in ALM { compute_primal_value(); compute_dual_value(); compute_gradient_norm(); delta=primal_value-dual_value; cout<<setprecision(9)<<" "<<(0.0+nb_iters)<<"; "<<running_time<<"; primal dual gap="<<delta<<" primal value="<<primal_value<<"; dual value="<<dual_value<<"; gradient_norm="<<gradient_norm<<endl; samp<<(0.0+nb_iters)<<" "<<delta<<" "<<running_time<<" "<<primal_value<<" "<<dual_value<<" "<<gradient_norm<<endl; gradient_norm=std::numeric_limits<double>::max(); } } void compute_and_record_result_always(){ if(evaluation==1) { compute_primal_value(); compute_dual_value(); compute_gradient_norm(); delta=primal_value-dual_value; cout<<setprecision(9)<<(0.0+nb_iters)<<"; "<<running_time<<"; primal dual gap="<<delta<<" primal value="<<primal_value<<"; dual value="<<dual_value<<endl; samp<<(0.0+nb_iters)<<" "<<delta<<" "<<running_time<<" "<<primal_value<<" "<<dual_value<<" "<<gradient_norm<<endl; gradient_norm=std::numeric_limits<double>::max(); } else if(evaluation==2) { compute_primal_value(); compute_gradient_norm(); cout<<setprecision(9)<<(0.0+nb_iters)<<"; "<<running_time<<"; gradient norm="<<gradient_norm<<" primal value="<<primal_value<<endl; samp<<(0.0+nb_iters)<<" "<<gradient_norm<<" "<<running_time<<" "<<primal_value<<endl; } else if(evaluation==3) //running APPROX for solving the subproblem in ALM { compute_primal_value(); compute_dual_value(); compute_gradient_norm(); delta=primal_value-dual_value; cout<<setprecision(9)<<" "<<(0.0+nb_iters)<<"; "<<running_time<<"; primal dual gap="<<delta<<" primal value="<<primal_value<<"; dual value="<<dual_value<<"; gradient_norm="<<gradient_norm<<endl; samp<<(0.0+nb_iters)<<" "<<delta<<" "<<running_time<<" "<<primal_value<<" "<<dual_value<<" "<<gradient_norm<<endl; gradient_norm=std::numeric_limits<double>::max(); } } void APPROX_MU(vector<D> & x0, L val_tau, D val_mu_f, D val_mu_psi, L eval, L p_N, L max_nb_epoch, D eps, string filename, L val_mod) { initialize(x0, val_tau, val_mu_f, val_mu_psi, eval, p_N, val_mod); cout<<"running APPROX MU"<<" ; "<<filename<<" max_nb_epoch "<<max_nb_epoch<<"; eps="<<eps<<endl; nb_iters=0; compute_and_record_result(); //srand48(27432042); srand(time(NULL)); D start; while(delta>eps && nb_iters<max_nb_epoch) { //start = std::clock(); start=omp_get_wtime(); for(L it=0;it<nb_of_iters_per_loop;it++) { gamma*=(1-theta)/(1-taumuovern); if(theta==1) gamma=1; batch_sampling(); //#pragma omp parallel { //#pragma omp for for(L it_S=0;it_S<tau;it_S++) { L i=S[it_S]; D si=z[i]+taumuovern*gamma/theta*u[i]; D gi=partial_i_of_f(i); D Li=v[i]*novertau*theta; t[i]=compute_prox(gi, Li, si,i); } } //#pragma omp parallel for for(L it_S=0;it_S<tau;it_S++) { L i=S[it_S]; D ti=t[i]; update_z_coordinate(i, ti+taumuovern*gamma/theta*u[i]); update_u_coordinate(i, (-1+novertau*theta)/gamma*ti-taumuovern/theta*u[i]); } update_theta(); } //running_time+=( std::clock() - start ) / (double) CLOCKS_PER_SEC; running_time+=omp_get_wtime()-start; nb_iters++; compute_and_record_result(); } compute_and_record_result_always(); } void APPROX_MU2(vector<D> & x0, L val_tau, D val_mu_f, D val_mu_psi, L eval, L p_N, L max_nb_epoch, D eps, string filename, L val_mod) { initialize(x0, val_tau, val_mu_f, val_mu_psi, eval, p_N, val_mod); cout<<"running APPROX MU"<<" ; "<<filename<<" max_nb_epoch "<<max_nb_epoch<<"; eps="<<eps<<endl; nb_iters=0; compute_and_record_result(); //srand48(27432042); srand(time(NULL)); D start; while(nb_iters<max_nb_epoch) { start = std::clock(); D ti=0; D si=0; D gi=0; D Li=0; L i=0; for(L it=0;it<nb_of_iters_per_loop;it++) { gamma*=(1-theta)/(1-taumuovern); if(theta==1) gamma=1; batch_sampling(); for(L it_S=0;it_S<tau;it_S++) { i=S[it_S]; si=z[i]+taumuovern*gamma/theta*u[i]; gi=partial_i_of_f(i); Li=v[i]*novertau*theta; t[i]=compute_prox(gi, Li, si,i); } for(L it_S=0;it_S<tau;it_S++) { i=S[it_S]; ti=t[i]; update_z_coordinate(i, ti+taumuovern*gamma/theta*u[i]); update_u_coordinate(i, (-1+novertau*theta)/gamma*ti-taumuovern/theta*u[i]); } update_theta(); } running_time+=( std::clock() - start ) / (double) CLOCKS_PER_SEC; nb_iters++; compute_and_record_result(); } compute_and_record_result_always(); } void prox_grad_step(){ D gi; for(L i=0;i<n;i++){ gi=partial_i_of_f(i); t[i]=compute_prox(gi, sumofLi, x[i],i); } for(L i=0;i<n;i++){ update_x_coordinate(i, t[i]); } } void do_single_step_prox(vector<D> & Tx){ for(L i=0;i<n;i++){ D gi=partial_i_of_f(i); Tx[i]=compute_prox(gi, sumofLi, x[i],i)+x[i]; } } }; #endif
convolution_winograd_transform.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd63_transform_output_sse(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 6; const int h_tiles = outh / 6; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); const float bias0 = biasptr ? biasptr[p] : 0.f; float tmp[6][8]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 1; const float* output0_tm_1 = output0_tm_0 + tiles * 1; const float* output0_tm_2 = output0_tm_0 + tiles * 2; const float* output0_tm_3 = output0_tm_0 + tiles * 3; const float* output0_tm_4 = output0_tm_0 + tiles * 4; const float* output0_tm_5 = output0_tm_0 + tiles * 5; const float* output0_tm_6 = output0_tm_0 + tiles * 6; const float* output0_tm_7 = output0_tm_0 + tiles * 7; // TODO sse optimize for (int m = 0; m < 8; m++) { float tmp024a = output0_tm_1[0] + output0_tm_2[0]; float tmp135a = output0_tm_1[0] - output0_tm_2[0]; float tmp024b = output0_tm_3[0] + output0_tm_4[0]; float tmp135b = output0_tm_3[0] - output0_tm_4[0]; float tmp024c = output0_tm_5[0] + output0_tm_6[0]; float tmp135c = output0_tm_5[0] - output0_tm_6[0]; tmp[0][m] = output0_tm_0[0] + tmp024a + tmp024b + tmp024c * 32; tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; tmp[5][m] = output0_tm_7[0] + tmp135a + tmp135b * 32 + tmp135c; output0_tm_0 += tiles * 8; output0_tm_1 += tiles * 8; output0_tm_2 += tiles * 8; output0_tm_3 += tiles * 8; output0_tm_4 += tiles * 8; output0_tm_5 += tiles * 8; output0_tm_6 += tiles * 8; output0_tm_7 += tiles * 8; } float* output0 = out0.row(i * 6) + j * 6; for (int m = 0; m < 6; m++) { const float* tmp0 = tmp[m]; float tmp024a = tmp0[1] + tmp0[2]; float tmp135a = tmp0[1] - tmp0[2]; float tmp024b = tmp0[3] + tmp0[4]; float tmp135b = tmp0[3] - tmp0[4]; float tmp024c = tmp0[5] + tmp0[6]; float tmp135c = tmp0[5] - tmp0[6]; output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw; } } } } } static void conv3x3s1_winograd43_transform_input_sse(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 4; const int h_tiles = (h - 2) / 4; const int tiles = w_tiles * h_tiles; // const float itm[6][6] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[6][6]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* r0 = img0.row(i * 4) + (j * 4); for (int m = 0; m < 6; m++) { float r00 = r0[0]; float r01 = r0[1]; float r02 = r0[2]; float r03 = r0[3]; float r04 = r0[4]; float r05 = r0[5]; float tmp0m = 4 * r00 - 5 * r02 + r04; float tmp1m = -4 * (r01 + r02) + r04 + r03; float tmp2m = 4 * (r01 - r02) + r04 - r03; float tmp3m = -2 * (r01 - r03) + r04 - r02; float tmp4m = 2 * (r01 - r03) + r04 - r02; float tmp5m = 4 * r01 - 5 * r03 + r05; tmp[0][m] = tmp0m; tmp[1][m] = tmp1m; tmp[2][m] = tmp2m; tmp[3][m] = tmp3m; tmp[4][m] = tmp4m; tmp[5][m] = tmp5m; r0 += w; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j); float* r0_tm_1 = r0_tm_0 + tiles; float* r0_tm_2 = r0_tm_0 + tiles * 2; float* r0_tm_3 = r0_tm_0 + tiles * 3; float* r0_tm_4 = r0_tm_0 + tiles * 4; float* r0_tm_5 = r0_tm_0 + tiles * 5; for (int m = 0; m < 6; m++) { float tmp00 = tmp[m][0]; float tmp01 = tmp[m][1]; float tmp02 = tmp[m][2]; float tmp03 = tmp[m][3]; float tmp04 = tmp[m][4]; float tmp05 = tmp[m][5]; float r0tm0 = 4 * tmp00 - 5 * tmp02 + tmp04; float r0tm1 = -4 * (tmp01 + tmp02) + tmp04 + tmp03; float r0tm2 = 4 * (tmp01 - tmp02) + tmp04 - tmp03; float r0tm3 = -2 * (tmp01 - tmp03) + tmp04 - tmp02; float r0tm4 = 2 * (tmp01 - tmp03) + tmp04 - tmp02; float r0tm5 = 4 * tmp01 - 5 * tmp03 + tmp05; r0_tm_0[0] = r0tm0; r0_tm_1[0] = r0tm1; r0_tm_2[0] = r0tm2; r0_tm_3[0] = r0tm3; r0_tm_4[0] = r0tm4; r0_tm_5[0] = r0tm5; r0_tm_0 += tiles * 6; r0_tm_1 += tiles * 6; r0_tm_2 += tiles * 6; r0_tm_3 += tiles * 6; r0_tm_4 += tiles * 6; r0_tm_5 += tiles * 6; } } } } } static void conv3x3s1_winograd43_transform_output_sse(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 4; const int h_tiles = outh / 4; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); float bias0 = biasptr ? biasptr[p] : 0.f; float tmp[4][6]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j); const float* output0_tm_1 = output0_tm_0 + tiles; const float* output0_tm_2 = output0_tm_0 + tiles * 2; const float* output0_tm_3 = output0_tm_0 + tiles * 3; const float* output0_tm_4 = output0_tm_0 + tiles * 4; const float* output0_tm_5 = output0_tm_0 + tiles * 5; float* output0 = out0.row(i * 4) + (j * 4); for (int m = 0; m < 6; m++) { float out0tm0 = output0_tm_0[0]; float out0tm1 = output0_tm_1[0]; float out0tm2 = output0_tm_2[0]; float out0tm3 = output0_tm_3[0]; float out0tm4 = output0_tm_4[0]; float out0tm5 = output0_tm_5[0]; float tmp02a = out0tm1 + out0tm2; float tmp13a = out0tm1 - out0tm2; float tmp02b = out0tm3 + out0tm4; float tmp13b = out0tm3 - out0tm4; float tmp0m = out0tm0 + tmp02a + tmp02b; float tmp1m = tmp13a + tmp13b * 2; float tmp2m = tmp02a + tmp02b * 4; float tmp3m = out0tm5 + tmp13a + tmp13b * 8; tmp[0][m] = tmp0m; tmp[1][m] = tmp1m; tmp[2][m] = tmp2m; tmp[3][m] = tmp3m; output0_tm_0 += tiles * 6; output0_tm_1 += tiles * 6; output0_tm_2 += tiles * 6; output0_tm_3 += tiles * 6; output0_tm_4 += tiles * 6; output0_tm_5 += tiles * 6; } for (int m = 0; m < 4; m++) { float tmp00 = tmp[m][0]; float tmp01 = tmp[m][1]; float tmp02 = tmp[m][2]; float tmp03 = tmp[m][3]; float tmp04 = tmp[m][4]; float tmp05 = tmp[m][5]; float tmp02a = tmp01 + tmp02; float tmp13a = tmp01 - tmp02; float tmp02b = tmp03 + tmp04; float tmp13b = tmp03 - tmp04; float out00 = bias0 + tmp00 + tmp02a + tmp02b; float out01 = bias0 + tmp13a + tmp13b * 2; float out02 = bias0 + tmp02a + tmp02b * 4; float out03 = bias0 + tmp05 + tmp13a + tmp13b * 8; output0[0] = out00; output0[1] = out01; output0[2] = out02; output0[3] = out03; output0 += outw; } } } } } static void conv3x3s1_winograd23_transform_input_sse(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 2; const int h_tiles = (h - 2) / 2; const int tiles = w_tiles * h_tiles; // const float itm[4][4] = { // {1.0f, 0.0f, -1.0f, 0.0f}, // {0.0f, 1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 0.00f, 1.0f} // }; // 0 = r00 - r02 // 1 = r01 + r02 // 2 = r02 - r01 // 3 = r03 - r01 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[4][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* r0 = img0.row(i * 2) + (j * 2); for (int m = 0; m < 4; m++) { float r00 = r0[0]; float r01 = r0[1]; float r02 = r0[2]; float r03 = r0[3]; float tmp0m = r00 - r02; float tmp1m = r01 + r02; float tmp2m = r02 - r01; float tmp3m = r03 - r01; tmp[0][m] = tmp0m; tmp[1][m] = tmp1m; tmp[2][m] = tmp2m; tmp[3][m] = tmp3m; r0 += w; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j); float* r0_tm_1 = r0_tm_0 + tiles; float* r0_tm_2 = r0_tm_0 + tiles * 2; float* r0_tm_3 = r0_tm_0 + tiles * 3; for (int m = 0; m < 4; m++) { float tmp00 = tmp[m][0]; float tmp01 = tmp[m][1]; float tmp02 = tmp[m][2]; float tmp03 = tmp[m][3]; float r0tm0 = tmp00 - tmp02; float r0tm1 = tmp01 + tmp02; float r0tm2 = tmp02 - tmp01; float r0tm3 = tmp03 - tmp01; r0_tm_0[0] = r0tm0; r0_tm_1[0] = r0tm1; r0_tm_2[0] = r0tm2; r0_tm_3[0] = r0tm3; r0_tm_0 += tiles * 4; r0_tm_1 += tiles * 4; r0_tm_2 += tiles * 4; r0_tm_3 += tiles * 4; } } } } } static void conv3x3s1_winograd23_transform_output_sse(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 2; const int h_tiles = outh / 2; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[2][4] = { // {1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r00 + r01 + r02 // 1 = r01 - r02 + r03 #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); float bias0 = biasptr ? biasptr[p] : 0.f; float tmp[2][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j); const float* output0_tm_1 = output0_tm_0 + tiles; const float* output0_tm_2 = output0_tm_0 + tiles * 2; const float* output0_tm_3 = output0_tm_0 + tiles * 3; float* output0 = out0.row(i * 2) + (j * 2); for (int m = 0; m < 4; m++) { float out0tm0 = output0_tm_0[0]; float out0tm1 = output0_tm_1[0]; float out0tm2 = output0_tm_2[0]; float out0tm3 = output0_tm_3[0]; float tmp0m = out0tm0 + out0tm1 + out0tm2; float tmp1m = out0tm1 - out0tm2 + out0tm3; tmp[0][m] = tmp0m; tmp[1][m] = tmp1m; output0_tm_0 += tiles * 4; output0_tm_1 += tiles * 4; output0_tm_2 += tiles * 4; output0_tm_3 += tiles * 4; } for (int m = 0; m < 2; m++) { float tmp00 = tmp[m][0]; float tmp01 = tmp[m][1]; float tmp02 = tmp[m][2]; float tmp03 = tmp[m][3]; float out00 = bias0 + tmp00 + tmp01 + tmp02; float out01 = bias0 + tmp01 - tmp02 + tmp03; output0[0] = out00; output0[1] = out01; output0 += outw; } } } } }
oneWayFunction.c
// Copyright (c) 2016-2018 Ulord Foundation Ltd. #include "oneWayFunction.h" #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include <assert.h> #ifndef MAC_OSX #include <omp.h> #endif #include "my_time.h" #include "common.h" // OpenSSL Library #include "c_sha1.h" #include "c_sha256.h" #include "c_sha512.h" #include "c_sha3_256.h" #include "c_whirlpool.h" #include "c_ripemd160.h" #include "c_blake2s256.h" #include "c_aes128.h" #include "c_des.h" #include "c_crc32.h" #include "c_hmac_md5.h" #include "c_rc4.h" #include "c_camellia128.h" // JTR source code #include "c_gost.h" #include "c_haval5_256.h" #include "c_skein512_256.h" OneWayFunctionInfor funcInfor[FUNCTION_NUM] = { "SHA3-256", crypto_sha3_256, "SHA1", crypto_sha1, "SHA256", crypto_sha256, "SHA512", crypto_sha512, "Whirlpool", crypto_whirlpool, "RIPEMD-160", crypto_ripemd160, "BLAKE2s(256bits)", crypto_blake2s256, "AES(128bits)", crypto_aes128, "DES", crypto_des, "RC4", crypto_rc4, "Camellia(128bits)", crypto_camellia128, "CRC32", crypto_crc32, "HMAC(MD5)", crypto_hmac_md5, "GOST R 34.11-94", crypto_gost, "HAVAL-256/5", crypto_haval5_256, "Skein-512(256bits)", crypto_skein512_256 }; void initOneWayFunction() { gost_init_table(); CRC32_Table_Init(); } void testOneWayFunction(const char *mess, uint32_t messLen, const int64_t iterNum) { /* int64_t j; uint32_t messLen = (uint32_t)strlen(mess); uint8_t input[INPUT_LEN], output[FUNCTION_NUM][OUTPUT_LEN]; memset(input, 0, INPUT_LEN*sizeof(uint8_t)); memcpy(input, mess, messLen*sizeof(char)); printf("**************************** Correctness test (One way function) ****************************\n"); printf("Test message: %s\n", mess); for (int i = 0; i < FUNCTION_NUM; ++i) { printf("%02d ", i); funcInfor[i].func(input, messLen, output[i]); view_data_u8(funcInfor[i].funcName, output[i], OUTPUT_LEN); } printf("*********************************************************************************************\n"); printf("************************************************* Performance test (One way function) *************************************************\n"); uint8_t *result = (uint8_t *)malloc(iterNum * OUTPUT_LEN * sizeof(uint8_t)); assert(NULL != result); memset(result, 0, iterNum * OUTPUT_LEN * sizeof(uint8_t)); uint32_t threadNumArr[] = {1, 4, 8, 12, 16, 20, 24, 32, 48, 64}; uint32_t threadNumTypes = sizeof(threadNumArr) / sizeof(uint32_t); printf(" %-18s", "Algorithm"); for (uint32_t ix = 0; ix < threadNumTypes; ++ix) printf("%12d", threadNumArr[ix]); printf("\n"); for (int i = 0; i < FUNCTION_NUM; ++i) { printf("%02d %-18s\t", i, funcInfor[i].funcName); for (uint32_t ix = 0; ix < threadNumTypes; ++ix) { omp_set_num_threads(threadNumArr[ix]); double startTime = get_wall_time(); if (threadNumArr[ix] == 1) { for (j = 0; j < iterNum; ++j) { funcInfor[i].func(input, messLen, result + j * OUTPUT_LEN); } } else { #pragma omp parallel for firstprivate(input), private(j) shared(result) for (j = 0; j < iterNum; ++j) { funcInfor[i].func(input, messLen, result + j * OUTPUT_LEN); } } double endTime = get_wall_time(); double costTime = endTime - startTime; printf("%5.0f Kps ", iterNum / 1000 / costTime); fflush(stdout); // Check result for (j = 0; j < iterNum; j += 1) { if (memcmp(output[i], result + j * OUTPUT_LEN, OUTPUT_LEN)) { printf("Thread num: %u, j: %ld\n", threadNumArr[ix], j); view_data_u8("output", output[i], OUTPUT_LEN); view_data_u8("result", result + j * OUTPUT_LEN, OUTPUT_LEN); abort(); } } } printf("\n"); } if (NULL != result) { free(result); result = NULL; } */ printf("***************************************************************************************************************************************\n"); }
target-data-6c.c
// ---------------------------------------------------------------------------------------- // Implementation of Example target.3c (Section 52.3, page 196) from Openmp // 4.0.2 Examples // on the document http://openmp.org/mp-documents/openmp-examples-4.0.2.pdf // // // // // ---------------------------------------------------------------------------------------- #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <time.h> #include <unistd.h> #ifdef _OPENMP #include <omp.h> #endif #include "BenchmarksUtil.h" // define the error threshold for the results "not matching" #define ERROR_THRESHOLD 0.05 /* Problem size */ #define N 8192 #define THRESHOLD 4096 /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init(DATA_TYPE *A, DATA_TYPE *B) { int i; for (i = 0; i < N; i++) { A[i] = i / 2.0; B[i] = ((N - 1) - i) / 3.0; } return; } void init_again(DATA_TYPE *A, DATA_TYPE *B) { int i; for (i = 0; i < N; i++) { A[i] = i; B[i] = ((N - 1) - i); } return; } void vec_mult(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C) { int i; for (i = 0; i < N; i++) C[i] = A[i] * B[i]; init_again(A, B); for (i = 0; i < N; i++) C[i] += A[i] * B[i]; } void vec_mult_OMP(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C) { int i; #pragma omp target data if (N > THRESHOLD) map(from : C[ : N]) { #pragma omp target if (N > THRESHOLD) map(to : A[ : N], B[ : N]) #pragma omp parallel for for (i = 0; i < N; i++) C[i] = A[i] * B[i]; init_again(A, B); #pragma omp target if (N > THRESHOLD) map(to : A[ : N], B[ : N]) #pragma omp parallel for for (i = 0; i < N; i++) C[i] += A[i] * B[i]; } } int compareResults(DATA_TYPE *B, DATA_TYPE *B_GPU) { int i, fail; fail = 0; // Compare B and B_GPU for (i = 0; i < N; i++) { if (B[i] != B_GPU[i]) printf("DIFF @ %d![%f, %f]\n", i, B[i], B_GPU[i]); if (percentDiff(B[i], B_GPU[i]) > ERROR_THRESHOLD) { fail++; } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f " "Percent: %d\n", ERROR_THRESHOLD, fail); return fail; } int main(int argc, char *argv[]) { double t_start, t_end, t_start_OMP, t_end_OMP; int fail = 0; DATA_TYPE *A; DATA_TYPE *B; DATA_TYPE *C; DATA_TYPE *C_OMP; A = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE)); B = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE)); C = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE)); C_OMP = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE)); fprintf(stdout, ">> Two vector multiplication <<\n"); // initialize the arrays init(A, B); t_start_OMP = rtclock(); vec_mult_OMP(A, B, C_OMP); t_end_OMP = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end_OMP - t_start_OMP); //); #ifdef RUN_TEST // initialize the arrays init(A, B); t_start = rtclock(); vec_mult(A, B, C); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); //); fail = compareResults(C, C_OMP); #endif free(A); free(B); free(C); free(C_OMP); return fail; }
convolution_sgemm_pack8to4_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if NCNN_RUNTIME_CPU && NCNN_ARM82DOT && __ARM_NEON && __aarch64__ && !__ARM_FEATURE_DOTPROD void im2col_sgemm_pack8to4_int8_neon_asimddp(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt); void convolution_im2col_sgemm_transform_kernel_pack8to4_int8_neon_asimddp(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h); #endif static void im2col_sgemm_pack8to4_int8_neon(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt) { #if NCNN_RUNTIME_CPU && NCNN_ARM82DOT && __ARM_NEON && __aarch64__ && !__ARM_FEATURE_DOTPROD if (ncnn::cpu_support_arm_asimddp()) { im2col_sgemm_pack8to4_int8_neon_asimddp(bottom_im2col, top_blob, kernel, opt); return; } #endif // Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; // permute Mat tmp; #if __aarch64__ #if __ARM_FEATURE_DOTPROD if (size >= 16) tmp.create(16 * maxk, inch, size / 16 + (size % 16) / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 8) tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator); else tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator); #else // __ARM_FEATURE_DOTPROD if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator); else tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator); #endif // __ARM_FEATURE_DOTPROD #else // __aarch64__ if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator); else tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator); #endif // __aarch64__ { #if __aarch64__ #if __ARM_FEATURE_DOTPROD int nn_size = size >> 4; int remain_size_start = 0; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 16; signed char* tmpptr = tmp.channel(i / 16); for (int q = 0; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { // split pack8 to pack4 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld2 {v0.4s, v1.4s}, [%0], #32 \n" "ld2 {v2.4s, v3.4s}, [%0], #32 \n" "ld2 {v4.4s, v5.4s}, [%0], #32 \n" "ld2 {v6.4s, v7.4s}, [%0] \n" "sub %0, %0, #96 \n" "st1 {v0.16b}, [%1], #16 \n" "st1 {v2.16b}, [%1], #16 \n" "st1 {v4.16b}, [%1], #16 \n" "st1 {v6.16b}, [%1], #16 \n" "st1 {v1.16b}, [%1], #16 \n" "st1 {v3.16b}, [%1], #16 \n" "st1 {v5.16b}, [%1], #16 \n" "st1 {v7.16b}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); img0 += size * 8; } } } remain_size_start += nn_size << 4; nn_size = (size - remain_size_start) >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8); for (int q = 0; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld2 {v0.4s, v1.4s}, [%0], #32 \n" "ld2 {v2.4s, v3.4s}, [%0] \n" "sub %0, %0, #32 \n" "st1 {v0.16b}, [%1], #16 \n" "st1 {v2.16b}, [%1], #16 \n" "st1 {v1.16b}, [%1], #16 \n" "st1 {v3.16b}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3"); img0 += size * 8; } } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #else // __ARM_FEATURE_DOTPROD int remain_size_start = 0; int nn_size = (size - remain_size_start) >> 2; #endif // __ARM_FEATURE_DOTPROD #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; #if __ARM_FEATURE_DOTPROD signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4); #else signed char* tmpptr = tmp.channel(i / 4); #endif for (int q = 0; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { #if __ARM_FEATURE_DOTPROD asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld2 {v0.4s, v1.4s}, [%0] \n" "st1 {v0.4s, v1.4s}, [%1], #32 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1"); #else asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.16b, v1.16b}, [%0] \n" "st1 {v0.16b, v1.16b}, [%1], #32 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1"); #endif // __ARM_FEATURE_DOTPROD img0 += size * 8; } } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #else int remain_size_start = 0; int nn_size = (size - remain_size_start) >> 1; #endif #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; #if __aarch64__ #if __ARM_FEATURE_DOTPROD signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2); #else signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #endif #else signed char* tmpptr = tmp.channel(i / 2); #endif for (int q = 0; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld2 {v0.2s, v1.2s}, [%0] \n" "st1 {v0.2s, v1.2s}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1"); #else asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.16b}, [%0] \n" "st1 {v0.16b}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); #endif // __ARM_FEATURE_DOTPROD #else asm volatile( "pld [%0, #128] \n" "vld1.s8 {d0-d1}, [%0 :64] \n" "vst1.s8 {d0-d1}, [%1 :64]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0"); #endif img0 += size * 8; } } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #else signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #endif #else signed char* tmpptr = tmp.channel(i / 2 + i % 2); #endif for (int q = 0; q < inch; q++) { const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #64] \n" "ld1 {v0.8b}, [%0] \n" "st1 {v0.8b}, [%1], #8 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); #else asm volatile( "pld [%0, #64] \n" "vld1.s8 {d0}, [%0 :64] \n" "vst1.s8 {d0}, [%1 :64]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "d0"); #endif img0 += size * 8; } } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { int* outptr0 = top_blob.channel(p); int i = 0; #if __aarch64__ #if __ARM_FEATURE_DOTPROD for (; i + 15 < size; i += 16) { const signed char* tmpptr = tmp.channel(i / 16); const signed char* kptr0 = kernel.channel(p); int nn = inch * maxk; // inch always > 0 asm volatile( "ld1 {v24.16b}, [%3], #16 \n" // _w0123_l "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "ld1 {v16.16b}, [%2], #16 \n" // _val0123_l "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "eor v4.16b, v4.16b, v4.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "0: \n" "ld1 {v17.16b}, [%2], #16 \n" // _val4567_l "sdot v0.4s, v24.16b, v16.4b[0] \n" "sdot v1.4s, v24.16b, v16.4b[1] \n" "sdot v2.4s, v24.16b, v16.4b[2] \n" "sdot v3.4s, v24.16b, v16.4b[3] \n" "ld1 {v18.16b}, [%2], #16 \n" // _val891011_l "sdot v4.4s, v24.16b, v17.4b[0] \n" "sdot v5.4s, v24.16b, v17.4b[1] \n" "sdot v6.4s, v24.16b, v17.4b[2] \n" "sdot v7.4s, v24.16b, v17.4b[3] \n" "ld1 {v19.16b}, [%2], #16 \n" // _val12131415_l "sdot v8.4s, v24.16b, v18.4b[0] \n" "sdot v9.4s, v24.16b, v18.4b[1] \n" "ld1 {v25.16b}, [%3], #16 \n" // _w0123_h "sdot v10.4s, v24.16b, v18.4b[2] \n" "sdot v11.4s, v24.16b, v18.4b[3] \n" "ld1 {v20.16b}, [%2], #16 \n" // _val0123_h "sdot v12.4s, v24.16b, v19.4b[0] \n" "sdot v13.4s, v24.16b, v19.4b[1] \n" "sdot v14.4s, v24.16b, v19.4b[2] \n" "sdot v15.4s, v24.16b, v19.4b[3] \n" "ld1 {v21.16b}, [%2], #16 \n" // _val4567_h "sdot v0.4s, v25.16b, v20.4b[0] \n" "sdot v1.4s, v25.16b, v20.4b[1] \n" "sdot v2.4s, v25.16b, v20.4b[2] \n" "sdot v3.4s, v25.16b, v20.4b[3] \n" "ld1 {v22.16b}, [%2], #16 \n" // _val891011_h "sdot v4.4s, v25.16b, v21.4b[0] \n" "sdot v5.4s, v25.16b, v21.4b[1] \n" "sdot v6.4s, v25.16b, v21.4b[2] \n" "sdot v7.4s, v25.16b, v21.4b[3] \n" "ld1 {v23.16b}, [%2], #16 \n" // _val12131415_h "sdot v8.4s, v25.16b, v22.4b[0] \n" "sdot v9.4s, v25.16b, v22.4b[1] \n" "ld1 {v24.16b}, [%3], #16 \n" // _w0123_l "sdot v10.4s, v25.16b, v22.4b[2] \n" "sdot v11.4s, v25.16b, v22.4b[3] \n" "ld1 {v16.16b}, [%2], #16 \n" // _val0123_l "sdot v12.4s, v25.16b, v23.4b[0] \n" "sdot v13.4s, v25.16b, v23.4b[1] \n" "subs %w1, %w1, #1 \n" "sdot v14.4s, v25.16b, v23.4b[2] \n" "sdot v15.4s, v25.16b, v23.4b[3] \n" "bne 0b \n" "sub %2, %2, #16 \n" "sub %3, %3, #16 \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%0], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%0], #64 \n" : "=r"(outptr0), "=r"(nn), "=r"(tmpptr), "=r"(kptr0) : "0"(outptr0), "1"(nn), "2"(tmpptr), "3"(kptr0) : "memory", "x4", "x5", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < size; i += 8) { const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8); const signed char* kptr0 = kernel.channel(p); int nn = inch * maxk; // inch always > 0 int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int32x4_t _sum4 = vdupq_n_s32(0); int32x4_t _sum5 = vdupq_n_s32(0); int32x4_t _sum6 = vdupq_n_s32(0); int32x4_t _sum7 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _val4567_l = vld1q_s8(tmpptr + 16); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum0 = vdotq_laneq_s32(_sum0, _w0123_l, _val0123_l, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_l, _val0123_l, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_l, _val0123_l, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_l, _val0123_l, 3); _sum4 = vdotq_laneq_s32(_sum4, _w0123_l, _val4567_l, 0); _sum5 = vdotq_laneq_s32(_sum5, _w0123_l, _val4567_l, 1); _sum6 = vdotq_laneq_s32(_sum6, _w0123_l, _val4567_l, 2); _sum7 = vdotq_laneq_s32(_sum7, _w0123_l, _val4567_l, 3); int8x16_t _val0123_h = vld1q_s8(tmpptr + 32); int8x16_t _val4567_h = vld1q_s8(tmpptr + 48); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum0 = vdotq_laneq_s32(_sum0, _w0123_h, _val0123_h, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_h, _val0123_h, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_h, _val0123_h, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_h, _val0123_h, 3); _sum4 = vdotq_laneq_s32(_sum4, _w0123_h, _val4567_h, 0); _sum5 = vdotq_laneq_s32(_sum5, _w0123_h, _val4567_h, 1); _sum6 = vdotq_laneq_s32(_sum6, _w0123_h, _val4567_h, 2); _sum7 = vdotq_laneq_s32(_sum7, _w0123_h, _val4567_h, 3); tmpptr += 64; kptr0 += 32; } vst1q_s32(outptr0, _sum0); vst1q_s32(outptr0 + 4, _sum1); vst1q_s32(outptr0 + 8, _sum2); vst1q_s32(outptr0 + 12, _sum3); vst1q_s32(outptr0 + 16, _sum4); vst1q_s32(outptr0 + 20, _sum5); vst1q_s32(outptr0 + 24, _sum6); vst1q_s32(outptr0 + 28, _sum7); outptr0 += 32; } #endif for (; i + 3 < size; i += 4) { #if __ARM_FEATURE_DOTPROD const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4); #else const signed char* tmpptr = tmp.channel(i / 4); #endif const signed char* kptr0 = kernel.channel(p); int nn = inch * maxk; // inch always > 0 #if __ARM_FEATURE_DOTPROD int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int8x16_t _val0123_l = vld1q_s8(tmpptr); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum0 = vdotq_laneq_s32(_sum0, _w0123_l, _val0123_l, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_l, _val0123_l, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_l, _val0123_l, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_l, _val0123_l, 3); int8x16_t _val0123_h = vld1q_s8(tmpptr + 16); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum0 = vdotq_laneq_s32(_sum0, _w0123_h, _val0123_h, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_h, _val0123_h, 1); _sum2 = vdotq_laneq_s32(_sum2, _w0123_h, _val0123_h, 2); _sum3 = vdotq_laneq_s32(_sum3, _w0123_h, _val0123_h, 3); tmpptr += 32; kptr0 += 32; } vst1q_s32(outptr0, _sum0); vst1q_s32(outptr0 + 4, _sum1); vst1q_s32(outptr0 + 8, _sum2); vst1q_s32(outptr0 + 12, _sum3); outptr0 += 16; #else // __ARM_FEATURE_DOTPROD asm volatile( "eor v0.16b, v0.16b, v0.16b \n" "eor v1.16b, v1.16b, v1.16b \n" "eor v2.16b, v2.16b, v2.16b \n" "eor v3.16b, v3.16b, v3.16b \n" "eor v4.16b, v4.16b, v4.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "prfm pldl1keep, [%2, #128] \n" "prfm pldl1keep, [%3, #256] \n" "lsr w4, %w1, #1 \n" // w4 = nn >> 1 "cmp w4, #0 \n" "beq 1f \n" "prfm pldl1keep, [%3, #512] \n" "add x5, %2, #16 \n" "prfm pldl1keep, [x5, #128] \n" "ld1 {v16.16b}, [%2] \n" // val L H "ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [%3], #64 \n" "add %2, %2, #32 \n" "ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L "ld1 {v18.16b}, [%2] \n" "add %2, %2, #32 \n" "0: \n" "smull v24.8h, v16.8b, v20.8b \n" "prfm pldl1keep, [%3, #256] \n" "smull2 v25.8h, v17.16b, v20.16b \n" "prfm pldl1keep, [%3, #512] \n" "smull v26.8h, v16.8b, v21.8b \n" "subs w4, w4, #1 \n" "smull2 v27.8h, v17.16b, v21.16b \n" "ext v19.16b, v18.16b, v18.16b, #8 \n" // val H L "smlal v24.8h, v18.8b, v22.8b \n" "smlal2 v25.8h, v19.16b, v22.16b \n" "smlal v26.8h, v18.8b, v23.8b \n" "smlal2 v27.8h, v19.16b, v23.16b \n" "smull2 v29.8h, v16.16b, v20.16b \n" "sadalp v0.4s, v24.8h \n" "smull v28.8h, v17.8b, v20.8b \n" "sadalp v1.4s, v25.8h \n" "smull2 v31.8h, v16.16b, v21.16b \n" "ld1 {v16.16b}, [x5] \n" // val L H "smull v30.8h, v17.8b, v21.8b \n" "add x5, x5, #32 \n" "smlal2 v29.8h, v18.16b, v22.16b \n" "sadalp v2.4s, v26.8h \n" "smlal v28.8h, v19.8b, v22.8b \n" "sadalp v3.4s, v27.8h \n" "smlal2 v31.8h, v18.16b, v23.16b \n" "ld1 {v18.16b}, [x5] \n" "smlal v30.8h, v19.8b, v23.8b \n" "ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L "smull v24.8h, v16.8b, v20.8b \n" "add x5, x5, #32 \n" "smull2 v25.8h, v17.16b, v20.16b \n" "prfm pldl1keep, [x5, #128] \n" "smull v26.8h, v16.8b, v21.8b \n" "prfm pldl1keep, [x5, #384] \n" "smull2 v27.8h, v17.16b, v21.16b \n" "ext v19.16b, v18.16b, v18.16b, #8 \n" // val H L "smlal v24.8h, v18.8b, v22.8b \n" "sadalp v5.4s, v29.8h \n" "smlal2 v25.8h, v19.16b, v22.16b \n" "sadalp v4.4s, v28.8h \n" "smlal v26.8h, v18.8b, v23.8b \n" "sadalp v7.4s, v31.8h \n" "smlal2 v27.8h, v19.16b, v23.16b \n" "sadalp v6.4s, v30.8h \n" "smull2 v29.8h, v16.16b, v20.16b \n" "sadalp v8.4s, v24.8h \n" "smull v28.8h, v17.8b, v20.8b \n" "sadalp v9.4s, v25.8h \n" "smull2 v31.8h, v16.16b, v21.16b \n" "ld1 {v16.16b}, [%2] \n" // val L H "smull v30.8h, v17.8b, v21.8b \n" "add %2, %2, #32 \n" "smlal2 v29.8h, v18.16b, v22.16b \n" "sadalp v10.4s, v26.8h \n" "smlal v28.8h, v19.8b, v22.8b \n" "sadalp v11.4s, v27.8h \n" "smlal2 v31.8h, v18.16b, v23.16b \n" "ld1 {v18.16b}, [%2] \n" "smlal v30.8h, v19.8b, v23.8b \n" "add %2, %2, #32 \n" "ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [%3], #64 \n" "sadalp v13.4s, v29.8h \n" "prfm pldl1keep, [%2, #128] \n" "sadalp v12.4s, v28.8h \n" "prfm pldl1keep, [%2, #384] \n" "sadalp v15.4s, v31.8h \n" "ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L "sadalp v14.4s, v30.8h \n" "bne 0b \n" "sub %2, %2, #64 \n" "sub %3, %3, #64 \n" "1: \n" "and w4, %w1, #1 \n" // w4 = remain = nn & 1 "cmp w4, #0 \n" // w4 > 0 "beq 2f \n" "ld1 {v16.8b, v17.8b}, [%2], #16 \n" "ld1 {v20.8b, v21.8b, v22.8b, v23.8b}, [%3], #32 \n" "smull v24.8h, v16.8b, v20.8b \n" "smull v25.8h, v16.8b, v21.8b \n" "smull v26.8h, v16.8b, v22.8b \n" "ld1 {v18.8b, v19.8b}, [%2], #16 \n" "smull v27.8h, v16.8b, v23.8b \n" "sadalp v0.4s, v24.8h \n" "smull v28.8h, v17.8b, v20.8b \n" "sadalp v1.4s, v25.8h \n" "smull v29.8h, v17.8b, v21.8b \n" "sadalp v2.4s, v26.8h \n" "smull v30.8h, v17.8b, v22.8b \n" "sadalp v3.4s, v27.8h \n" "smull v31.8h, v17.8b, v23.8b \n" "sadalp v4.4s, v28.8h \n" "smull v24.8h, v18.8b, v20.8b \n" "sadalp v5.4s, v29.8h \n" "smull v25.8h, v18.8b, v21.8b \n" "sadalp v6.4s, v30.8h \n" "smull v26.8h, v18.8b, v22.8b \n" "sadalp v7.4s, v31.8h \n" "smull v27.8h, v18.8b, v23.8b \n" "sadalp v8.4s, v24.8h \n" "smull v28.8h, v19.8b, v20.8b \n" "sadalp v9.4s, v25.8h \n" "smull v29.8h, v19.8b, v21.8b \n" "sadalp v10.4s, v26.8h \n" "smull v30.8h, v19.8b, v22.8b \n" "sadalp v11.4s, v27.8h \n" "smull v31.8h, v19.8b, v23.8b \n" "sadalp v12.4s, v28.8h \n" "sadalp v13.4s, v29.8h \n" "sadalp v14.4s, v30.8h \n" "sadalp v15.4s, v31.8h \n" "2: \n" "addp v0.4s, v0.4s, v1.4s \n" "addp v2.4s, v2.4s, v3.4s \n" "addp v4.4s, v4.4s, v5.4s \n" "addp v6.4s, v6.4s, v7.4s \n" "addp v8.4s, v8.4s, v9.4s \n" "addp v10.4s, v10.4s, v11.4s \n" "addp v12.4s, v12.4s, v13.4s \n" "addp v14.4s, v14.4s, v15.4s \n" "addp v0.4s, v0.4s, v2.4s \n" "addp v1.4s, v4.4s, v6.4s \n" "addp v2.4s, v8.4s, v10.4s \n" "addp v3.4s, v12.4s, v14.4s \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" : "=r"(outptr0), "=r"(nn), "=r"(tmpptr), "=r"(kptr0) : "0"(outptr0), "1"(nn), "2"(tmpptr), "3"(kptr0) : "memory", "x4", "x5", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); #endif // __ARM_FEATURE_DOTPROD } #endif // __aarch64__ for (; i + 1 < size; i += 2) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2); #else const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2); #endif #else const signed char* tmpptr = tmp.channel(i / 2); #endif const signed char* kptr0 = kernel.channel(p); int nn = inch * maxk; // inch always > 0 #if __aarch64__ #if __ARM_FEATURE_DOTPROD int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int8x16_t _val01_l_h = vld1q_s8(tmpptr); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum0 = vdotq_laneq_s32(_sum0, _w0123_l, _val01_l_h, 0); _sum1 = vdotq_laneq_s32(_sum1, _w0123_l, _val01_l_h, 1); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum0 = vdotq_laneq_s32(_sum0, _w0123_h, _val01_l_h, 2); _sum1 = vdotq_laneq_s32(_sum1, _w0123_h, _val01_l_h, 3); tmpptr += 16; kptr0 += 32; } vst1q_s32(outptr0, _sum0); vst1q_s32(outptr0 + 4, _sum1); outptr0 += 8; #else // __ARM_FEATURE_DOTPROD int32x4_t _sum00 = vdupq_n_s32(0); int32x4_t _sum01 = vdupq_n_s32(0); int32x4_t _sum02 = vdupq_n_s32(0); int32x4_t _sum03 = vdupq_n_s32(0); int32x4_t _sum10 = vdupq_n_s32(0); int32x4_t _sum11 = vdupq_n_s32(0); int32x4_t _sum12 = vdupq_n_s32(0); int32x4_t _sum13 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val0 = vld1q_s8(tmpptr); int8x16_t _val1 = vld1q_s8(tmpptr + 16); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv00 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w01)); int16x8_t _wv01 = vmull_s8(vget_low_s8(_val0), vget_high_s8(_w01)); int16x8_t _wv02 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w23)); int16x8_t _wv03 = vmull_s8(vget_low_s8(_val0), vget_high_s8(_w23)); int16x8_t _wv10 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w01)); int16x8_t _wv11 = vmull_s8(vget_high_s8(_val0), vget_high_s8(_w01)); int16x8_t _wv12 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w23)); int16x8_t _wv13 = vmull_s8(vget_high_s8(_val0), vget_high_s8(_w23)); int8x16_t _w45 = vld1q_s8(kptr0 + 32); int8x16_t _w67 = vld1q_s8(kptr0 + 48); _wv00 = vmlal_s8(_wv00, vget_low_s8(_val1), vget_low_s8(_w45)); _wv01 = vmlal_s8(_wv01, vget_low_s8(_val1), vget_high_s8(_w45)); _wv02 = vmlal_s8(_wv02, vget_low_s8(_val1), vget_low_s8(_w67)); _wv03 = vmlal_s8(_wv03, vget_low_s8(_val1), vget_high_s8(_w67)); _wv10 = vmlal_s8(_wv10, vget_high_s8(_val1), vget_low_s8(_w45)); _wv11 = vmlal_s8(_wv11, vget_high_s8(_val1), vget_high_s8(_w45)); _wv12 = vmlal_s8(_wv12, vget_high_s8(_val1), vget_low_s8(_w67)); _wv13 = vmlal_s8(_wv13, vget_high_s8(_val1), vget_high_s8(_w67)); _sum00 = vpadalq_s16(_sum00, _wv00); _sum01 = vpadalq_s16(_sum01, _wv01); _sum02 = vpadalq_s16(_sum02, _wv02); _sum03 = vpadalq_s16(_sum03, _wv03); _sum10 = vpadalq_s16(_sum10, _wv10); _sum11 = vpadalq_s16(_sum11, _wv11); _sum12 = vpadalq_s16(_sum12, _wv12); _sum13 = vpadalq_s16(_sum13, _wv13); tmpptr += 32; kptr0 += 64; } for (; j < nn; j++) { int8x16_t _val = vld1q_s8(tmpptr); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv00 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w01)); int16x8_t _wv01 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w01)); int16x8_t _wv02 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w23)); int16x8_t _wv03 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w23)); int16x8_t _wv10 = vmull_s8(vget_high_s8(_val), vget_low_s8(_w01)); int16x8_t _wv11 = vmull_s8(vget_high_s8(_val), vget_high_s8(_w01)); int16x8_t _wv12 = vmull_s8(vget_high_s8(_val), vget_low_s8(_w23)); int16x8_t _wv13 = vmull_s8(vget_high_s8(_val), vget_high_s8(_w23)); _sum00 = vpadalq_s16(_sum00, _wv00); _sum01 = vpadalq_s16(_sum01, _wv01); _sum02 = vpadalq_s16(_sum02, _wv02); _sum03 = vpadalq_s16(_sum03, _wv03); _sum10 = vpadalq_s16(_sum10, _wv10); _sum11 = vpadalq_s16(_sum11, _wv11); _sum12 = vpadalq_s16(_sum12, _wv12); _sum13 = vpadalq_s16(_sum13, _wv13); tmpptr += 16; kptr0 += 32; } int32x4_t _s001 = vpaddq_s32(_sum00, _sum01); int32x4_t _s023 = vpaddq_s32(_sum02, _sum03); int32x4_t _s101 = vpaddq_s32(_sum10, _sum11); int32x4_t _s123 = vpaddq_s32(_sum12, _sum13); int32x4_t _s00123 = vpaddq_s32(_s001, _s023); int32x4_t _s10123 = vpaddq_s32(_s101, _s123); vst1q_s32(outptr0, _s00123); vst1q_s32(outptr0 + 4, _s10123); outptr0 += 8; #endif // __ARM_FEATURE_DOTPROD #else // __aarch64__ asm volatile( "veor q0, q0 \n" "veor q1, q1 \n" "veor q2, q2 \n" "veor q3, q3 \n" "veor q4, q4 \n" "veor q5, q5 \n" "veor q6, q6 \n" "veor q7, q7 \n" "pld [%2, #256] \n" "lsr r4, %1, #1 \n" // r4 = nn = size >> 1 "cmp r4, #0 \n" "beq 1f \n" "add r5, %3, #16 \n" "pld [%3, #128] \n" "mov r6, #32 \n" "pld [%3, #384] \n" "vld1.s8 {d20-d21}, [%3 :128], r6 \n" // _w01 "vld1.s8 {d16-d19}, [%2 :128]! \n" // _val0 _val1 "vld1.s8 {d22-d23}, [%3 :128], r6 \n" // _w45 "0: \n" "vmull.s8 q12, d16, d20 \n" "pld [%2, #256] \n" "vmull.s8 q13, d16, d21 \n" "pld [%3, #384] \n" "vmull.s8 q14, d17, d20 \n" "vmull.s8 q15, d17, d21 \n" "vld1.s8 {d20-d21}, [r5 :128], r6 \n" // _w23 "vmlal.s8 q12, d18, d22 \n" "vmlal.s8 q13, d18, d23 \n" "subs r4, r4, #1 \n" "vmlal.s8 q14, d19, d22 \n" "vmlal.s8 q15, d19, d23 \n" "vld1.s8 {d22-d23}, [r5 :128], r6 \n" // _w67 "vpadal.s16 q0, q12 \n" "vmull.s8 q12, d16, d20 \n" "vpadal.s16 q1, q13 \n" "vmull.s8 q13, d16, d21 \n" "vpadal.s16 q4, q14 \n" "vmull.s8 q14, d17, d20 \n" "vpadal.s16 q5, q15 \n" "vmull.s8 q15, d17, d21 \n" "vld1.s8 {d16-d17}, [%2 :128]! \n" // _val0 "vmlal.s8 q12, d18, d22 \n" "vld1.s8 {d20-d21}, [%3 :128], r6 \n" // _w01 "vmlal.s8 q13, d18, d23 \n" "pld [r5, #128] \n" "vmlal.s8 q14, d19, d22 \n" "pld [r5, #384] \n" "vmlal.s8 q15, d19, d23 \n" "vld1.s8 {d18-d19}, [%2 :128]! \n" // _val1 "vpadal.s16 q2, q12 \n" "vld1.s8 {d22-d23}, [%3 :128], r6 \n" // _w45 "vpadal.s16 q3, q13 \n" "pld [%2, #128] \n" "vpadal.s16 q6, q14 \n" "pld [%3, #128] \n" "vpadal.s16 q7, q15 \n" "bne 0b \n" "sub %2, %2, #32 \n" "sub %3, %3, #64 \n" "1: \n" "and r4, %1, #1 \n" // r4 = remain = size & 1 "cmp r4, #0 \n" // r4 > 0 "beq 2f \n" "vld1.s8 {d16-d17}, [%2 :128]! \n" // _val "vld1.s8 {d20-d21}, [%3 :128]! \n" // _w01 "vmull.s8 q12, d16, d20 \n" "vld1.s8 {d22-d23}, [%3 :128]! \n" // _w23 "vmull.s8 q13, d16, d21 \n" "vmull.s8 q14, d17, d20 \n" "vmull.s8 q15, d17, d21 \n" "vpadal.s16 q0, q12 \n" "vmull.s8 q12, d16, d22 \n" "vpadal.s16 q1, q13 \n" "vmull.s8 q13, d16, d23 \n" "vpadal.s16 q4, q14 \n" "vmull.s8 q14, d17, d22 \n" "vpadal.s16 q5, q15 \n" "vmull.s8 q15, d17, d23 \n" "vpadal.s16 q2, q12 \n" "vpadal.s16 q3, q13 \n" "vpadal.s16 q6, q14 \n" "vpadal.s16 q7, q15 \n" "2: \n" "vpadd.s32 d16, d0, d1 \n" "vpadd.s32 d17, d2, d3 \n" "vpadd.s32 d18, d4, d5 \n" "vpadd.s32 d19, d6, d7 \n" "vpadd.s32 d20, d8, d9 \n" "vpadd.s32 d21, d10, d11 \n" "vpadd.s32 d22, d12, d13 \n" "vpadd.s32 d23, d14, d15 \n" "vpadd.s32 d0, d16, d17 \n" "vpadd.s32 d1, d18, d19 \n" "vpadd.s32 d2, d20, d21 \n" "vpadd.s32 d3, d22, d23 \n" "vst1.s32 {d0-d3}, [%0 :128]! \n" : "=r"(outptr0), "=r"(nn), "=r"(tmpptr), "=r"(kptr0) : "0"(outptr0), "1"(nn), "2"(tmpptr), "3"(kptr0) : "memory", "r4", "r5", "r6", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; i < size; i++) { #if __aarch64__ #if __ARM_FEATURE_DOTPROD const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #else const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2); #endif #else const signed char* tmpptr = tmp.channel(i / 2 + i % 2); #endif const signed char* kptr0 = kernel.channel(p); int nn = inch * maxk; // inch always > 0 #if __ARM_FEATURE_DOTPROD int32x4_t _sum0 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int8x8_t _val0_l_h = vld1_s8(tmpptr); int8x16_t _w0123_l = vld1q_s8(kptr0); _sum0 = vdotq_lane_s32(_sum0, _w0123_l, _val0_l_h, 0); int8x16_t _w0123_h = vld1q_s8(kptr0 + 16); _sum0 = vdotq_lane_s32(_sum0, _w0123_h, _val0_l_h, 1); tmpptr += 8; kptr0 += 32; } vst1q_s32(outptr0, _sum0); outptr0 += 4; #else // __ARM_FEATURE_DOTPROD int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int j = 0; for (; j + 1 < nn; j += 2) { int8x16_t _val = vld1q_s8(tmpptr); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv0 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w01)); int16x8_t _wv1 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w01)); int16x8_t _wv2 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w23)); int16x8_t _wv3 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w23)); int8x16_t _w45 = vld1q_s8(kptr0 + 32); int8x16_t _w67 = vld1q_s8(kptr0 + 48); _wv0 = vmlal_s8(_wv0, vget_high_s8(_val), vget_low_s8(_w45)); _wv1 = vmlal_s8(_wv1, vget_high_s8(_val), vget_high_s8(_w45)); _wv2 = vmlal_s8(_wv2, vget_high_s8(_val), vget_low_s8(_w67)); _wv3 = vmlal_s8(_wv3, vget_high_s8(_val), vget_high_s8(_w67)); _sum0 = vpadalq_s16(_sum0, _wv0); _sum1 = vpadalq_s16(_sum1, _wv1); _sum2 = vpadalq_s16(_sum2, _wv2); _sum3 = vpadalq_s16(_sum3, _wv3); tmpptr += 16; kptr0 += 64; } for (; j < nn; j++) { int8x8_t _val = vld1_s8(tmpptr); int8x16_t _w01 = vld1q_s8(kptr0); int8x16_t _w23 = vld1q_s8(kptr0 + 16); int16x8_t _wv0 = vmull_s8(_val, vget_low_s8(_w01)); int16x8_t _wv1 = vmull_s8(_val, vget_high_s8(_w01)); int16x8_t _wv2 = vmull_s8(_val, vget_low_s8(_w23)); int16x8_t _wv3 = vmull_s8(_val, vget_high_s8(_w23)); _sum0 = vpadalq_s16(_sum0, _wv0); _sum1 = vpadalq_s16(_sum1, _wv1); _sum2 = vpadalq_s16(_sum2, _wv2); _sum3 = vpadalq_s16(_sum3, _wv3); tmpptr += 8; kptr0 += 32; } #if __aarch64__ int32x4_t _s01 = vpaddq_s32(_sum0, _sum1); int32x4_t _s23 = vpaddq_s32(_sum2, _sum3); int32x4_t _s0123 = vpaddq_s32(_s01, _s23); #else int32x2_t _s01_low = vpadd_s32(vget_low_s32(_sum0), vget_high_s32(_sum0)); int32x2_t _s01_high = vpadd_s32(vget_low_s32(_sum1), vget_high_s32(_sum1)); int32x2_t _s23_low = vpadd_s32(vget_low_s32(_sum2), vget_high_s32(_sum2)); int32x2_t _s23_high = vpadd_s32(vget_low_s32(_sum3), vget_high_s32(_sum3)); int32x4_t _s0123 = vcombine_s32(vpadd_s32(_s01_low, _s01_high), vpadd_s32(_s23_low, _s23_high)); #endif vst1q_s32(outptr0, _s0123); outptr0 += 4; #endif // __ARM_FEATURE_DOTPROD } } } static void convolution_im2col_sgemm_transform_kernel_pack8to4_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h) { #if NCNN_RUNTIME_CPU && NCNN_ARM82DOT && __ARM_NEON && __aarch64__ && !__ARM_FEATURE_DOTPROD if (ncnn::cpu_support_arm_asimddp()) { convolution_im2col_sgemm_transform_kernel_pack8to4_int8_neon_asimddp(_kernel, kernel_tm, inch, outch, kernel_w, kernel_h); return; } #endif const int maxk = kernel_w * kernel_h; // interleave // src = maxk-inch-outch // dst = 8a-4b-maxk-inch/8a-outch/4b // dst = 4a-4b-2-maxk-inch/8a-outch/4b (arm82) Mat kernel = _kernel.reshape(maxk, inch, outch); kernel_tm.create(32 * maxk, inch / 8, outch / 4, (size_t)1u); for (int q = 0; q + 3 < outch; q += 4) { signed char* g00 = kernel_tm.channel(q / 4); for (int p = 0; p + 7 < inch; p += 8) { for (int k = 0; k < maxk; k++) { #if __ARM_FEATURE_DOTPROD for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } for (int i = 0; i < 4; i++) { for (int j = 4; j < 8; j++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } #else for (int i = 0; i < 4; i++) { for (int j = 0; j < 8; j++) { const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j); g00[0] = k00[k]; g00++; } } #endif } } } } static void convolution_im2col_sgemm_pack8to4_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator); { const int gap = (w * stride_h - outw * stride_w) * 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); signed char* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const signed char* sptr = img.row<const signed char>(dilation_h * u) + dilation_w * v * 8; for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { int8x8_t _val0 = vld1_s8(sptr); int8x8_t _val1 = vld1_s8(sptr + stride_w * 8); int8x8_t _val2 = vld1_s8(sptr + stride_w * 16); int8x8_t _val3 = vld1_s8(sptr + stride_w * 24); vst1_s8(ptr, _val0); vst1_s8(ptr + 8, _val1); vst1_s8(ptr + 16, _val2); vst1_s8(ptr + 24, _val3); sptr += stride_w * 32; ptr += 32; } for (; j + 1 < outw; j += 2) { int8x8_t _val0 = vld1_s8(sptr); int8x8_t _val1 = vld1_s8(sptr + stride_w * 8); vst1_s8(ptr, _val0); vst1_s8(ptr + 8, _val1); sptr += stride_w * 16; ptr += 16; } for (; j < outw; j++) { int8x8_t _val = vld1_s8(sptr); vst1_s8(ptr, _val); sptr += stride_w * 8; ptr += 8; } sptr += gap; } } } } } im2col_sgemm_pack8to4_int8_neon(bottom_im2col, top_blob, kernel, opt); }
daxpy_openmp.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include <math.h> #define VECTOR_SIZE 100 int main( int argc, char* argv[] ) { unsigned long int i,id; double alpha=1.0; // Number of bytes to allocate for N doubles size_t bytes = VECTOR_SIZE*sizeof(double); // Allocate memory for arrays X, A, B, and C on host double *X = (double*)malloc(bytes); double *A = (double*)malloc(bytes); double *B = (double*)malloc(bytes); double *C = (double*)malloc(bytes); #pragma omp parallel for shared(A,B) private(i,id) for(i=0;i<VECTOR_SIZE;i++){ id = omp_get_thread_num(); if (VECTOR_SIZE<=100){ printf("Initializing vectors id=%d working on i=%d\n", id,i); } else if(i%1000000==0){ printf("*"); } X[i]=M_PI*(double)(i+1)/VECTOR_SIZE; A[i]=cos(X[i])*cos(X[i]); B[i]=sin(X[i])*sin(X[i]); } #pragma omp parallel for shared(A,B,C) private(i,id) schedule(static,10) for(i=0;i<VECTOR_SIZE;i++){ id = omp_get_thread_num(); if (VECTOR_SIZE<=100){ printf("Computing C id=%d working on i=%d\n", id,i); } else if(i%1000000==0){ printf("#"); } C[i]=alpha*A[i]-B[i]; } printf("\n"); if (VECTOR_SIZE<=100){ for(i=0;i<VECTOR_SIZE;i++) printf("%9.5f %9.5f %9.5f %9.5f\n",X[i],A[i],B[i],C[i]); } else{ for(i=VECTOR_SIZE-10;i<VECTOR_SIZE;i++) printf("%9.5f %9.5f %9.5f %9.5f\n",X[i],A[i],B[i],C[i]); } // Release memory free(A); free(B); free(C); return 0; }
nodal_two_step_v_p_strategy_for_FSI.h
// // Project Name: KratosPFEMFluidDynamicsApplication $ // Last modified by: $Author: AFranci $ // Date: $Date: June 2018 $ // Revision: $Revision: 0.0 $ // // #ifndef KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_FOR_FSI_H #define KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_FOR_FSI_H #include "includes/define.h" #include "includes/model_part.h" #include "includes/deprecated_variables.h" #include "includes/cfd_variables.h" #include "utilities/openmp_utils.h" #include "processes/process.h" #include "solving_strategies/schemes/scheme.h" #include "solving_strategies/strategies/solving_strategy.h" #include "custom_utilities/mesher_utilities.hpp" #include "custom_utilities/boundary_normals_calculation_utilities.hpp" #include "geometries/geometry.h" #include "utilities/geometry_utilities.h" #include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h" #include "custom_strategies/builders_and_solvers/nodal_residualbased_elimination_builder_and_solver_for_FSI.h" #include "custom_strategies/builders_and_solvers/nodal_residualbased_elimination_builder_and_solver_continuity_for_FSI.h" #include "custom_strategies/builders_and_solvers/nodal_residualbased_block_builder_and_solver.h" #include "custom_utilities/solver_settings.h" #include "custom_strategies/strategies/gauss_seidel_linear_strategy.h" #include "pfem_fluid_dynamics_application_variables.h" #include "nodal_two_step_v_p_strategy.h" #include "nodal_two_step_v_p_strategy_for_FSI.h" #include <stdio.h> #include <math.h> #include <iostream> #include <fstream> namespace Kratos { ///@addtogroup PFEMFluidDynamicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ template <class TSparseSpace, class TDenseSpace, class TLinearSolver> class NodalTwoStepVPStrategyForFSI : public NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(NodalTwoStepVPStrategyForFSI); /// Counted pointer of NodalTwoStepVPStrategy //typedef boost::shared_ptr< NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> > Pointer; typedef NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TDataType TDataType; /// Node type (default is: Node<3>) typedef Node<3> NodeType; /// Geometry type (using with given NodeType) typedef Geometry<NodeType> GeometryType; typedef std::size_t SizeType; //typedef typename BaseType::DofSetType DofSetType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer StrategyPointerType; typedef TwoStepVPSolverSettings<TSparseSpace, TDenseSpace, TLinearSolver> SolverSettingsType; using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mVelocityTolerance; using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mPressureTolerance; using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mMaxPressureIter; using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mDomainSize; using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mTimeOrder; using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mReformDofSet; using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mpMomentumStrategy; using NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::mpPressureStrategy; typedef GeometryType::ShapeFunctionsGradientsType ShapeFunctionDerivativesArrayType; typedef GlobalPointersVector<Node<3>> NodeWeakPtrVectorType; ///@} ///@name Life Cycle ///@{ NodalTwoStepVPStrategyForFSI(ModelPart &rModelPart, SolverSettingsType &rSolverConfig) : BaseType(rModelPart) { NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::InitializeStrategy(rSolverConfig); } NodalTwoStepVPStrategyForFSI(ModelPart &rModelPart, /*SolverConfiguration<TSparseSpace, TDenseSpace, TLinearSolver>& rSolverConfig,*/ typename TLinearSolver::Pointer pVelocityLinearSolver, typename TLinearSolver::Pointer pPressureLinearSolver, bool ReformDofSet = true, double VelTol = 0.0001, double PresTol = 0.0001, int MaxPressureIterations = 1, // Only for predictor-corrector unsigned int TimeOrder = 2, unsigned int DomainSize = 2) : BaseType(rModelPart, pVelocityLinearSolver, pPressureLinearSolver, ReformDofSet, VelTol, PresTol, MaxPressureIterations, TimeOrder, DomainSize) { KRATOS_TRY; BaseType::SetEchoLevel(1); // Check that input parameters are reasonable and sufficient. this->Check(); bool CalculateNormDxFlag = true; bool ReformDofAtEachIteration = false; // DofSet modifiaction is managed by the fractional step strategy, auxiliary strategies should not modify the DofSet directly. // Additional Typedefs typedef typename BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer BuilderSolverTypePointer; typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; //initializing fractional velocity solution step typedef Scheme<TSparseSpace, TDenseSpace> SchemeType; typename SchemeType::Pointer pScheme; typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new ResidualBasedIncrementalUpdateStaticScheme<TSparseSpace, TDenseSpace>()); pScheme.swap(Temp); //CONSTRUCTION OF VELOCITY BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new NodalResidualBasedEliminationBuilderAndSolverForFSI<TSparseSpace, TDenseSpace, TLinearSolver>(pVelocityLinearSolver)); this->mpMomentumStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pVelocityLinearSolver, vel_build, ReformDofAtEachIteration, CalculateNormDxFlag)); this->mpMomentumStrategy->SetEchoLevel(BaseType::GetEchoLevel()); vel_build->SetCalculateReactionsFlag(false); BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI<TSparseSpace, TDenseSpace, TLinearSolver>(pPressureLinearSolver)); this->mpPressureStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pPressureLinearSolver, pressure_build, ReformDofAtEachIteration, CalculateNormDxFlag)); this->mpPressureStrategy->SetEchoLevel(BaseType::GetEchoLevel()); pressure_build->SetCalculateReactionsFlag(false); KRATOS_CATCH(""); } /// Destructor. virtual ~NodalTwoStepVPStrategyForFSI() {} double Solve() override { // Initialize BDF2 coefficients ModelPart &rModelPart = BaseType::GetModelPart(); this->SetTimeCoefficients(rModelPart.GetProcessInfo()); double NormDp = 0.0; ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); double currentTime = rCurrentProcessInfo[TIME]; double timeInterval = rCurrentProcessInfo[DELTA_TIME]; bool timeIntervalChanged = rCurrentProcessInfo[TIME_INTERVAL_CHANGED]; unsigned int maxNonLinearIterations = mMaxPressureIter; std::cout << "\n Solve with nodally_integrated_two_step_vp strategy at t=" << currentTime << "s" << std::endl; if (timeIntervalChanged == true && currentTime > 10 * timeInterval) { maxNonLinearIterations *= 2; } if (currentTime < 10 * timeInterval) { if (BaseType::GetEchoLevel() > 1) std::cout << "within the first 10 time steps, I consider the given iteration number x3" << std::endl; maxNonLinearIterations *= 3; } if (currentTime < 20 * timeInterval && currentTime >= 10 * timeInterval) { if (BaseType::GetEchoLevel() > 1) std::cout << "within the second 10 time steps, I consider the given iteration number x2" << std::endl; maxNonLinearIterations *= 2; } bool momentumConverged = true; bool continuityConverged = false; bool fixedTimeStep = false; // bool momentumAlreadyConverged=false; // bool continuityAlreadyConverged=false; /* boost::timer solve_step_time; */ // std::cout<<" InitializeSolutionStep().... "<<std::endl; // this->UnactiveSliverElements(); //this is done in set_active_flag_mesher_process which is activated from fluid_pre_refining_mesher.py InitializeSolutionStep(); // it fills SOLID_NODAL_SFD_NEIGHBOURS_ORDER for solids and NODAL_SFD_NEIGHBOURS_ORDER for fluids and inner solids for (unsigned int it = 0; it < maxNonLinearIterations; ++it) { if (BaseType::GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "----- > iteration: " << it << std::endl; if (it == 0) { ComputeNodalVolumeAndAssignFlagToElementType(); // it assings NODAL_VOLUME to fluid and SOLID_NODAL_VOLUME to solid. Interface nodes have both this->InitializeNonLinearIterations(); // it fills SOLID_NODAL_SFD_NEIGHBOURS for solids and NODAL_SFD_NEIGHBOURS for fluids } // std::cout<<" CalcNodalStrainsAndStresses .... "<<std::endl; CalcNodalStrainsAndStresses(); // it computes stresses and strains for fluid and solid nodes // std::cout<<" CalcNodalStrainsAndStresses DONE "<<std::endl; momentumConverged = this->SolveMomentumIteration(it, maxNonLinearIterations, fixedTimeStep); UpdateTopology(rModelPart, BaseType::GetEchoLevel()); // std::cout<<" ComputeNodalVolume .... "<<std::endl; ComputeNodalVolume(); // std::cout<<" ComputeNodalVolume DONE "<<std::endl; this->InitializeNonLinearIterations(); // std::cout<<" InitializeNonLinearIterations DONE "<<std::endl; CalcNodalStrains(); // std::cout<<" CalcNodalStrains DONE "<<std::endl; if (fixedTimeStep == false) { continuityConverged = this->SolveContinuityIteration(it, maxNonLinearIterations); } // if((momentumConverged==true || it==maxNonLinearIterations-1) && momentumAlreadyConverged==false){ // std::ofstream myfile; // myfile.open ("momentumConvergedIteration.txt",std::ios::app); // myfile << currentTime << "\t" << it << "\n"; // myfile.close(); // momentumAlreadyConverged=true; // } // if((continuityConverged==true || it==maxNonLinearIterations-1) && continuityAlreadyConverged==false){ // std::ofstream myfile; // myfile.open ("continuityConvergedIteration.txt",std::ios::app); // myfile << currentTime << "\t" << it << "\n"; // myfile.close(); // continuityAlreadyConverged=true; // } if (it == maxNonLinearIterations - 1 || ((continuityConverged && momentumConverged) && it > 1)) { //this->ComputeErrorL2NormCaseImposedG(); //this->ComputeErrorL2NormCasePoiseuille(); this->CalculateAccelerations(); // std::ofstream myfile; // myfile.open ("maxConvergedIteration.txt",std::ios::app); // myfile << currentTime << "\t" << it << "\n"; // myfile.close(); } bool hybridMethod=false; if(hybridMethod==true){ if (it == maxNonLinearIterations - 1 || ((continuityConverged && momentumConverged) && it > 0)) { this->UpdateElementalStressStrain(); } } if ((continuityConverged && momentumConverged) && it > 1) { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false); rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false); std::cout << "nodal V-P strategy converged in " << it + 1 << " iterations." << std::endl; break; } if (fixedTimeStep == true) { break; } } if (!continuityConverged && !momentumConverged && BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "Convergence tolerance not reached." << std::endl; if (mReformDofSet) this->Clear(); /* std::cout << "solve_step_time : " << solve_step_time.elapsed() << std::endl; */ return NormDp; } void UpdateElementalStressStrain() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); #pragma omp parallel { ModelPart::ElementIterator ElemBegin; ModelPart::ElementIterator ElemEnd; OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd); for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { /* itElem-> InitializeElementStrainStressState(); */ itElem->InitializeSolutionStep(rCurrentProcessInfo); } } } void Initialize() override { std::cout << " \n Initialize in nodal_two_step_v_p_strategy_FSI" << std::endl; ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); unsigned int sizeStrains = 3 * (dimension - 1); // #pragma omp parallel // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); unsigned int neighbourNodes = neighb_nodes.size(); unsigned int sizeSDFNeigh = neighbourNodes * dimension; if (itNode->SolutionStepsDataHas(NODAL_CAUCHY_STRESS)) { Vector &rNodalStress = itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS); if (rNodalStress.size() != sizeStrains) { rNodalStress.resize(sizeStrains, false); } noalias(rNodalStress) = ZeroVector(sizeStrains); } else { std::cout << "THIS node does not have NODAL_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_DEVIATORIC_CAUCHY_STRESS)) { Vector &rNodalStress = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS); if (rNodalStress.size() != sizeStrains) { rNodalStress.resize(sizeStrains, false); } noalias(rNodalStress) = ZeroVector(sizeStrains); } else { std::cout << "THIS node does not have NODAL_DEVIATORIC_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_VOLUME)) { itNode->FastGetSolutionStepValue(NODAL_VOLUME) = 0; } else { std::cout << "THIS node does not have NODAL_VOLUME... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_MEAN_MESH_SIZE)) { itNode->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0; } else { std::cout << "THIS node does not have NODAL_MEAN_MESH_SIZE... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_FREESURFACE_AREA)) { itNode->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0; } else { std::cout << "THIS node does not have NODAL_FREESURFACE_AREA... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_SFD_NEIGHBOURS)) { Vector &rNodalSFDneighbours = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); if (rNodalSFDneighbours.size() != sizeSDFNeigh) { rNodalSFDneighbours.resize(sizeSDFNeigh, false); } noalias(rNodalSFDneighbours) = ZeroVector(sizeSDFNeigh); } else { std::cout << "THIS node does not have NODAL_SFD_NEIGHBOURS... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_SPATIAL_DEF_RATE)) { Vector &rSpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); if (rSpatialDefRate.size() != sizeStrains) { rSpatialDefRate.resize(sizeStrains, false); } noalias(rSpatialDefRate) = ZeroVector(sizeStrains); } else { std::cout << "THIS node does not have NODAL_SPATIAL_DEF_RATE... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD)) { Matrix &rFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); if (rFgrad.size1() != dimension) { rFgrad.resize(dimension, dimension, false); } noalias(rFgrad) = ZeroMatrix(dimension, dimension); } else { std::cout << "THIS node does not have NODAL_DEFORMATION_GRAD... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD_VEL)) { Matrix &rFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); if (rFgradVel.size1() != dimension) { rFgradVel.resize(dimension, dimension, false); } noalias(rFgradVel) = ZeroMatrix(dimension, dimension); } else { std::cout << "THIS node does not have NODAL_DEFORMATION_GRAD_VEL... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(SOLID_NODAL_CAUCHY_STRESS)) { Vector &rSolidNodalStress = itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS); if (rSolidNodalStress.size() != sizeStrains) { rSolidNodalStress.resize(sizeStrains, false); } noalias(rSolidNodalStress) = ZeroVector(sizeStrains); } else { std::cout << "THIS node does not have SOLID_NODAL_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS)) { Vector &rSolidNodalStress = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS); if (rSolidNodalStress.size() != sizeStrains) { rSolidNodalStress.resize(sizeStrains, false); } noalias(rSolidNodalStress) = ZeroVector(sizeStrains); } else { std::cout << "THIS node does not have SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(SOLID_NODAL_VOLUME)) { itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME) = 0; } else { std::cout << "THIS node does not have SOLID_NODAL_VOLUME... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(SOLID_NODAL_MEAN_MESH_SIZE)) { itNode->FastGetSolutionStepValue(SOLID_NODAL_MEAN_MESH_SIZE) = 0; } else { std::cout << "THIS node does not have SOLID_NODAL_MEAN_MESH_SIZE... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(SOLID_NODAL_FREESURFACE_AREA)) { itNode->FastGetSolutionStepValue(SOLID_NODAL_FREESURFACE_AREA) = 0; } else { std::cout << "THIS node does not have SOLID_NODAL_FREESURFACE_AREA... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(SOLID_NODAL_SFD_NEIGHBOURS)) { Vector &rSolidNodalSFDneighbours = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS); if (rSolidNodalSFDneighbours.size() != sizeSDFNeigh) { rSolidNodalSFDneighbours.resize(sizeSDFNeigh, false); } noalias(rSolidNodalSFDneighbours) = ZeroVector(sizeSDFNeigh); } else { std::cout << "THIS node does not have SOLID_NODAL_SFD_NEIGHBOURS... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(SOLID_NODAL_SPATIAL_DEF_RATE)) { Vector &rSolidSpatialDefRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE); if (rSolidSpatialDefRate.size() != sizeStrains) { rSolidSpatialDefRate.resize(sizeStrains, false); } noalias(rSolidSpatialDefRate) = ZeroVector(sizeStrains); } else { std::cout << "THIS node does not have SOLID_NODAL_SPATIAL_DEF_RATE... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEFORMATION_GRAD)) { Matrix &rSolidFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD); if (rSolidFgrad.size1() != dimension) { rSolidFgrad.resize(dimension, dimension, false); } noalias(rSolidFgrad) = ZeroMatrix(dimension, dimension); } else { std::cout << "THIS node does not have SOLID_NODAL_DEFORMATION_GRAD... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEFORMATION_GRAD_VEL)) { Matrix &rSolidFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL); if (rSolidFgradVel.size1() != dimension) { rSolidFgradVel.resize(dimension, dimension, false); } noalias(rSolidFgradVel) = ZeroMatrix(dimension, dimension); } else { std::cout << "THIS node does not have SOLID_NODAL_DEFORMATION_GRAD_VEL... " << itNode->X() << " " << itNode->Y() << std::endl; } AssignMaterialToEachNode(itNode); } // } } void AssignMaterialToEachNode(ModelPart::NodeIterator itNode) { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; double deviatoricCoeff = 0; double volumetricCoeff = 0; if (itNode->Is(SOLID)) { double youngModulus = itNode->FastGetSolutionStepValue(YOUNG_MODULUS); double poissonRatio = itNode->FastGetSolutionStepValue(POISSON_RATIO); //deviatoricCoeff=deltaT*secondLame deviatoricCoeff = timeInterval * youngModulus / (1.0 + poissonRatio) * 0.5; //volumetricCoeff=bulk*deltaT=deltaT*(firstLame+2*secondLame/3) volumetricCoeff = timeInterval * poissonRatio * youngModulus / ((1.0 + poissonRatio) * (1.0 - 2.0 * poissonRatio)) + 2.0 * deviatoricCoeff / 3.0; } else if (itNode->Is(FLUID) || itNode->Is(RIGID)) { deviatoricCoeff = itNode->FastGetSolutionStepValue(DYNAMIC_VISCOSITY); volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS); } if ((itNode->Is(SOLID) && itNode->Is(RIGID))) { itNode->FastGetSolutionStepValue(INTERFACE_NODE) = true; } else { itNode->FastGetSolutionStepValue(INTERFACE_NODE) = false; } double currFirstLame = volumetricCoeff - 2.0 * deviatoricCoeff / 3.0; //currFirstLame=deltaT*firstLame itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT) = currFirstLame; itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT) = deviatoricCoeff; } void UnactiveSliverElements() { KRATOS_TRY; ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); MesherUtilities MesherUtils; double ModelPartVolume = MesherUtils.ComputeModelPartVolume(rModelPart); double CriticalVolume = 0.001 * ModelPartVolume / double(rModelPart.Elements().size()); double ElementalVolume = 0; #pragma omp parallel { ModelPart::ElementIterator ElemBegin; ModelPart::ElementIterator ElemEnd; OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd); for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { unsigned int numNodes = itElem->GetGeometry().size(); if (numNodes == (dimension + 1)) { if (dimension == 2) { ElementalVolume = (itElem)->GetGeometry().Area(); } else if (dimension == 3) { ElementalVolume = (itElem)->GetGeometry().Volume(); } if (ElementalVolume < CriticalVolume) { // std::cout << "sliver element: it has Volume: " << ElementalVolume << " vs CriticalVolume(meanVol/1000): " << CriticalVolume<< std::endl; (itElem)->Set(ACTIVE, false); } else { (itElem)->Set(ACTIVE, true); } } } } KRATOS_CATCH(""); } void ComputeNodalVolume() { ModelPart &rModelPart = BaseType::GetModelPart(); ElementsArrayType &pElements = rModelPart.Elements(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, pElements.size(), element_partition); // #pragma omp parallel // { int k = OpenMPUtils::ThisThread(); typename ElementsArrayType::iterator ElemBegin = pElements.begin() + element_partition[k]; typename ElementsArrayType::iterator ElemEnd = pElements.begin() + element_partition[k + 1]; for (typename ElementsArrayType::iterator itElem = ElemBegin; itElem != ElemEnd; itElem++) //MSI: To be parallelized { Element::GeometryType &geometry = itElem->GetGeometry(); double elementalVolume = 0; if (dimension == 2) { elementalVolume = geometry.Area() / 3.0; } else if (dimension == 3) { elementalVolume = geometry.Volume() * 0.25; } // index = 0; unsigned int numNodes = geometry.size(); for (unsigned int i = 0; i < numNodes; i++) { double &nodalVolume = geometry(i)->FastGetSolutionStepValue(NODAL_VOLUME); nodalVolume += elementalVolume; if (itElem->Is(SOLID)) { double &solidVolume = geometry(i)->FastGetSolutionStepValue(SOLID_NODAL_VOLUME); solidVolume += elementalVolume; nodalVolume += -elementalVolume; // if(geometry(i)->FastGetSolutionStepValue(INTERFACE_NODE)==true){ // //I have the subtract the solid volume to the nodal volume of the interface fluid nodes because I added it before // nodalVolume += -elementalVolume; // } } } } // } } void ComputeNodalVolumeAndAssignFlagToElementType() { ModelPart &rModelPart = BaseType::GetModelPart(); ElementsArrayType &pElements = rModelPart.Elements(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, pElements.size(), element_partition); // #pragma omp parallel // { int k = OpenMPUtils::ThisThread(); typename ElementsArrayType::iterator ElemBegin = pElements.begin() + element_partition[k]; typename ElementsArrayType::iterator ElemEnd = pElements.begin() + element_partition[k + 1]; for (typename ElementsArrayType::iterator itElem = ElemBegin; itElem != ElemEnd; itElem++) //MSI: To be parallelized { Element::GeometryType &geometry = itElem->GetGeometry(); double elementalVolume = 0; if (dimension == 2) { elementalVolume = geometry.Area() / 3.0; } else if (dimension == 3) { elementalVolume = geometry.Volume() * 0.25; } // index = 0; unsigned int numNodes = geometry.size(); unsigned int fluidNodes = 0; unsigned int solidNodes = 0; unsigned int interfaceNodes = 0; for (unsigned int i = 0; i < numNodes; i++) { if ((geometry(i)->Is(FLUID) && geometry(i)->IsNot(SOLID)) || (geometry(i)->Is(FLUID) && geometry(i)->FastGetSolutionStepValue(INTERFACE_NODE) == true)) { fluidNodes += 1; } if (geometry(i)->Is(SOLID)) { solidNodes += 1; } if (geometry(i)->FastGetSolutionStepValue(INTERFACE_NODE) == true) { interfaceNodes += 1; } } if (solidNodes == numNodes) { itElem->Set(SOLID); // std::cout<<"THIS SOLID ELEMENT WAS "<<geometry(0)->Id()<<" "<<geometry(1)->Id()<<" "<<geometry(2)->Id()<<" "<<std::endl; } if (interfaceNodes == numNodes) { itElem->Set(SOLID); // std::cout<<"THIS INTERFACE ELEMENT WAS "<<geometry(0)->Id()<<" "<<geometry(1)->Id()<<" "<<geometry(2)->Id()<<" "<<std::endl; } if (fluidNodes == numNodes) { itElem->Set(FLUID); // std::cout<<"THIS FLUID ELEMENT WAS "<<geometry(0)->Id()<<" "<<geometry(1)->Id()<<" "<<geometry(2)->Id()<<" "<<std::endl; } if (solidNodes == numNodes && fluidNodes == numNodes) { itElem->Reset(FLUID); // std::cout<<"THIS ELEMENT WAS BOTH FLUID AND SOLID "<<geometry(0)->Id()<<" "<<geometry(1)->Id()<<" "<<geometry(2)->Id()<<" "<<std::endl; } for (unsigned int i = 0; i < numNodes; i++) { double &nodalVolume = geometry(i)->FastGetSolutionStepValue(NODAL_VOLUME); nodalVolume += elementalVolume; if (itElem->Is(SOLID)) { double &solidVolume = geometry(i)->FastGetSolutionStepValue(SOLID_NODAL_VOLUME); solidVolume += elementalVolume; nodalVolume += -elementalVolume; // if(geometry(i)->FastGetSolutionStepValue(INTERFACE_NODE)==true){ // //I have the subtract the solid volume to the nodal volume of the interface fluid nodes because I added it before // nodalVolume += -elementalVolume; // } // if(interfaceNodes==numNodes && solidDensity==0){ // std::cout<<"This interface element has not a correct density....I am assigning it the fluid density----- TODO: IMPROVE IT, TAKE FROM NEIGHBOURS"<<std::endl; // double density=geometry(i)->FastGetSolutionStepValue(DENSITY); // geometry(i)->FastGetSolutionStepValue(SOLID_DENSITY)=density; // } } } } // } } void InitializeSolutionStep() override { FillNodalSFDVector(); } void FillNodalSFDVector() { // std::cout << "FillNodalSFDVector(); ... " << std::endl; ModelPart &rModelPart = BaseType::GetModelPart(); // #pragma omp parallel // { // ModelPart::NodeIterator NodesBegin; // ModelPart::NodeIterator NodesEnd; // OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); // for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) // { for (ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); itNode++) { this->InitializeNodalVariablesForRemeshedDomain(itNode); InitializeNodalVariablesForSolidRemeshedDomain(itNode); if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == false) { this->SetNeighboursOrderToNode(itNode); // it assigns neighbours to inner nodes, filling NODAL_SFD_NEIGHBOURS_ORDER if (itNode->Is(SOLID)) { SetNeighboursOrderToSolidNode(itNode); // it assigns neighbours to solid inner nodes, filling SOLID_NODAL_SFD_NEIGHBOURS_ORDER } } else { SetNeighboursOrderToInterfaceNode(itNode); // it assigns neighbours to interface nodes, filling SOLID_NODAL_SFD_NEIGHBOURS_ORDER for solids and NODAL_SFD_NEIGHBOURS_ORDER for fluids } } // } // std::cout << "FillNodalSFDVector(); DONE " << std::endl; } void SetNeighboursOrderToSolidNode(ModelPart::NodeIterator itNode) { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); unsigned int neighbourNodes = neighb_nodes.size() + 1; // +1 becausealso the node itself must be considered as nieghbor node Vector &rNodeOrderedNeighbours = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER); if (rNodeOrderedNeighbours.size() != neighbourNodes) rNodeOrderedNeighbours.resize(neighbourNodes, false); noalias(rNodeOrderedNeighbours) = ZeroVector(neighbourNodes); rNodeOrderedNeighbours[0] = itNode->Id(); if (neighbourNodes > 1) { for (unsigned int k = 0; k < neighbourNodes - 1; k++) { rNodeOrderedNeighbours[k + 1] = neighb_nodes[k].Id(); } } } void SetNeighboursOrderToInterfaceNode(ModelPart::NodeIterator itNode) { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); unsigned int neighbourNodes = neighb_nodes.size() + 1; unsigned int fluidCounter = 1; unsigned int solidCounter = 1; if (neighbourNodes > 1) { for (unsigned int k = 0; k < neighbourNodes - 1; k++) { if (neighb_nodes[k].IsNot(SOLID) || neighb_nodes[k].FastGetSolutionStepValue(INTERFACE_NODE) == true) { fluidCounter += 1; } if (neighb_nodes[k].Is(SOLID)) { solidCounter += 1; } } } Vector &rFluidNodeOrderedNeighbours = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); Vector &rSolidNodeOrderedNeighbours = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER); if (rFluidNodeOrderedNeighbours.size() != fluidCounter) rFluidNodeOrderedNeighbours.resize(fluidCounter, false); if (rSolidNodeOrderedNeighbours.size() != solidCounter) rSolidNodeOrderedNeighbours.resize(solidCounter, false); noalias(rFluidNodeOrderedNeighbours) = ZeroVector(fluidCounter); noalias(rSolidNodeOrderedNeighbours) = ZeroVector(solidCounter); rFluidNodeOrderedNeighbours[0] = itNode->Id(); rSolidNodeOrderedNeighbours[0] = itNode->Id(); fluidCounter = 0; solidCounter = 0; if (neighbourNodes > 1) { for (unsigned int k = 0; k < neighbourNodes - 1; k++) { if (neighb_nodes[k].IsNot(SOLID) || neighb_nodes[k].FastGetSolutionStepValue(INTERFACE_NODE) == true) { fluidCounter += 1; rFluidNodeOrderedNeighbours[fluidCounter] = neighb_nodes[k].Id(); } if (neighb_nodes[k].Is(SOLID)) { solidCounter += 1; rSolidNodeOrderedNeighbours[solidCounter] = neighb_nodes[k].Id(); } } } // fluidCounter+=1; // solidCounter+=1; // ModelPart& rModelPart = BaseType::GetModelPart(); // const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); // const unsigned int sizeFluidSDFNeigh=fluidCounter*dimension; // const unsigned int sizeSolidSDFNeigh=solidCounter*dimension; // Vector& rFluidNodalSFDneighbours=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); // Vector& rSolidNodalSFDneighbours=itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS); // if(rFluidNodalSFDneighbours.size() != sizeFluidSDFNeigh) // rFluidNodalSFDneighbours.resize(sizeFluidSDFNeigh,false); // if(rSolidNodalSFDneighbours.size() != sizeSolidSDFNeigh) // rSolidNodalSFDneighbours.resize(sizeSolidSDFNeigh,false); // noalias(rFluidNodalSFDneighbours)=ZeroVector(sizeFluidSDFNeigh); // noalias(rSolidNodalSFDneighbours)=ZeroVector(sizeSolidSDFNeigh); // rFluidNodalSFDneighbours.resize(sizeFluidSDFNeigh,true); // rSolidNodalSFDneighbours.resize(sizeSolidSDFNeigh,true); // std::cout<<"rFluidNodeOrderedNeighbours "<<rFluidNodeOrderedNeighbours<<std::endl; // std::cout<<"rSolidNodeOrderedNeighbours "<<rSolidNodeOrderedNeighbours<<std::endl; // std::cout<<"rFluidNodalSFDneighbours "<<rFluidNodalSFDneighbours<<std::endl; // std::cout<<"rSolidNodalSFDneighbours "<<rSolidNodalSFDneighbours<<std::endl; } void InitializeNodalVariablesForSolidRemeshedDomain(ModelPart::NodeIterator itNode) { ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); unsigned int sizeStrains = 3 * (dimension - 1); NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); unsigned int neighbourNodes = neighb_nodes.size() + 1; unsigned int sizeSDFNeigh = neighbourNodes * dimension; if (itNode->SolutionStepsDataHas(SOLID_NODAL_CAUCHY_STRESS)) { Vector &rSolidNodalStress = itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS); if (rSolidNodalStress.size() != sizeStrains) rSolidNodalStress.resize(sizeStrains, false); noalias(rSolidNodalStress) = ZeroVector(sizeStrains); } if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS)) { Vector &rSolidNodalDevStress = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS); if (rSolidNodalDevStress.size() != sizeStrains) rSolidNodalDevStress.resize(sizeStrains, false); noalias(rSolidNodalDevStress) = ZeroVector(sizeStrains); } if (itNode->SolutionStepsDataHas(SOLID_NODAL_SFD_NEIGHBOURS)) { Vector &rSolidNodalSFDneighbours = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS); if (rSolidNodalSFDneighbours.size() != sizeSDFNeigh) rSolidNodalSFDneighbours.resize(sizeSDFNeigh, false); noalias(rSolidNodalSFDneighbours) = ZeroVector(sizeSDFNeigh); } if (itNode->SolutionStepsDataHas(SOLID_NODAL_SFD_NEIGHBOURS_ORDER)) { Vector &rSolidNodalSFDneighboursOrder = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER); if (rSolidNodalSFDneighboursOrder.size() != neighbourNodes) rSolidNodalSFDneighboursOrder.resize(neighbourNodes, false); noalias(rSolidNodalSFDneighboursOrder) = ZeroVector(neighbourNodes); } if (itNode->SolutionStepsDataHas(SOLID_NODAL_SPATIAL_DEF_RATE)) { Vector &rSolidSpatialDefRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE); if (rSolidSpatialDefRate.size() != sizeStrains) rSolidSpatialDefRate.resize(sizeStrains, false); noalias(rSolidSpatialDefRate) = ZeroVector(sizeStrains); } if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEFORMATION_GRAD)) { Matrix &rSolidFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD); if (rSolidFgrad.size1() != dimension) rSolidFgrad.resize(dimension, dimension, false); noalias(rSolidFgrad) = ZeroMatrix(dimension, dimension); } if (itNode->SolutionStepsDataHas(SOLID_NODAL_DEFORMATION_GRAD_VEL)) { Matrix &rSolidFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL); if (rSolidFgradVel.size1() != dimension) rSolidFgradVel.resize(dimension, dimension, false); noalias(rSolidFgradVel) = ZeroMatrix(dimension, dimension); } if (itNode->SolutionStepsDataHas(SOLID_NODAL_VOLUME)) { itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME) = 0; } if (itNode->SolutionStepsDataHas(SOLID_NODAL_MEAN_MESH_SIZE)) { itNode->FastGetSolutionStepValue(SOLID_NODAL_MEAN_MESH_SIZE) = 0; } if (itNode->SolutionStepsDataHas(SOLID_NODAL_FREESURFACE_AREA)) { itNode->FastGetSolutionStepValue(SOLID_NODAL_FREESURFACE_AREA) = 0; } if (itNode->SolutionStepsDataHas(SOLID_NODAL_VOLUMETRIC_DEF_RATE)) { itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = 0; } if (itNode->SolutionStepsDataHas(SOLID_NODAL_EQUIVALENT_STRAIN_RATE)) { itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = 0; } } void CalcNodalStrainsAndStresses() { ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); // #pragma omp parallel // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME); double solidNodalVolume = itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME); double theta = 0.5; if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true) { if (nodalVolume > 0) { Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); Vector rNodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); Matrix &interfaceFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); Matrix &interfaceFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); if (interfaceFgrad.size1() != dimension) interfaceFgrad.resize(dimension, dimension, false); if (interfaceFgradVel.size1() != dimension) interfaceFgradVel.resize(dimension, dimension, false); noalias(interfaceFgrad) = ZeroMatrix(dimension, dimension); noalias(interfaceFgradVel) = ZeroMatrix(dimension, dimension); //I have to compute the stresses and strains two times because one time is for the solid and the other for the fluid // Matrix interfaceFgrad=ZeroMatrix(dimension,dimension); // Matrix interfaceFgradVel=ZeroMatrix(dimension,dimension); //the following function is more expensive than the general one because there is one loop more over neighbour nodes. This is why I do it here also for fluid interface nodes. ComputeAndStoreNodalDeformationGradientForInterfaceNode(itNode, nodalSFDneighboursId, rNodalSFDneigh, theta, interfaceFgrad, interfaceFgradVel); // itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD)=interfaceFgrad; // itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL)=interfaceFgradVel; CalcNodalStrainsAndStressesForInterfaceFluidNode(itNode); } if (solidNodalVolume > 0) { Vector solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER); Vector rSolidNodalSFDneigh = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS); Matrix &solidInterfaceFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD); Matrix &solidInterfaceFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL); if (solidInterfaceFgrad.size1() != dimension) solidInterfaceFgrad.resize(dimension, dimension, false); if (solidInterfaceFgradVel.size1() != dimension) solidInterfaceFgradVel.resize(dimension, dimension, false); noalias(solidInterfaceFgrad) = ZeroMatrix(dimension, dimension); noalias(solidInterfaceFgradVel) = ZeroMatrix(dimension, dimension); theta = 1.0; // Matrix solidInterfaceFgrad=ZeroMatrix(dimension,dimension); // Matrix solidInterfaceFgradVel=ZeroMatrix(dimension,dimension); ComputeAndStoreNodalDeformationGradientForInterfaceNode(itNode, solidNodalSFDneighboursId, rSolidNodalSFDneigh, theta, solidInterfaceFgrad, solidInterfaceFgradVel); // itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD)=solidInterfaceFgrad; // itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL)=solidInterfaceFgradVel; CalcNodalStrainsAndStressesForInterfaceSolidNode(itNode); } } else { if (itNode->Is(SOLID) && solidNodalVolume > 0) { theta = 1.0; ComputeAndStoreNodalDeformationGradientForSolidNode(itNode, theta); CalcNodalStrainsAndStressesForSolidNode(itNode); } else if (nodalVolume > 0) { theta = 0.5; this->ComputeAndStoreNodalDeformationGradient(itNode, theta); this->CalcNodalStrainsAndStressesForNode(itNode); } } if (nodalVolume == 0 && solidNodalVolume == 0) { // if nodalVolume==0 theta = 0.5; this->InitializeNodalVariablesForRemeshedDomain(itNode); InitializeNodalVariablesForSolidRemeshedDomain(itNode); } // } // if(itNode->Is(SOLID) && itNode->FastGetSolutionStepValue(INTERFACE_NODE)==false){ // CopyValuesToSolidNonInterfaceNodes(itNode); // } } // } /* std::cout << "Calc Nodal Strains And Stresses DONE " << std::endl; */ } void CopyValuesToSolidNonInterfaceNodes(ModelPart::NodeIterator itNode) { Vector &solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER); Vector &solidNodalSFDneigh = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS); Matrix &solidInterfaceFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD); Matrix &solidInterfaceFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL); Vector &solidSpatialDefRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE); double &volumetricDefRate = itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE); Vector &solidCauchyStress = itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS); Vector &solidDeviatoricCauchyStress = itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS); Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); unsigned int sizeNodalSFDneighboursId = nodalSFDneighboursId.size(); solidNodalSFDneighboursId.resize(sizeNodalSFDneighboursId, false); Vector nodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); unsigned int sizeNodalSFDneigh = nodalSFDneigh.size(); solidNodalSFDneigh.resize(sizeNodalSFDneigh, false); solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); solidNodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); solidInterfaceFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); solidInterfaceFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); solidSpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); volumetricDefRate = itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE); solidCauchyStress = itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS); solidDeviatoricCauchyStress = itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS); } void CalcNodalStrainsAndStressesForInterfaceFluidNode(ModelPart::NodeIterator itNode) { ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; double deviatoricCoeff = itNode->FastGetSolutionStepValue(DYNAMIC_VISCOSITY); double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR); if (yieldShear > 0) { double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE); double exponent = -adaptiveExponent * equivalentStrainRate; if (equivalentStrainRate != 0) { deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent)); } if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff = adaptiveExponent * yieldShear; } } double currFirstLame = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS); Matrix Fgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); Matrix FgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); double detFgrad = 1.0; Matrix InvFgrad = ZeroMatrix(dimension, dimension); Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension); if (dimension == 2) { MathUtils<double>::InvertMatrix2(Fgrad, InvFgrad, detFgrad); } else if (dimension == 3) { MathUtils<double>::InvertMatrix3(Fgrad, InvFgrad, detFgrad); } //it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj SpatialVelocityGrad = prod(FgradVel, InvFgrad); if (dimension == 2) { itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR); if (yieldShear > 0) { itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2])); double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE); double exponent = -adaptiveExponent * equivalentStrainRate; if (equivalentStrainRate != 0) { deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent)); } if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff = adaptiveExponent * yieldShear; } } double DefVol = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1]; itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol; double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0]; double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1]; double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]; double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0); double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0); double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]; // if(itNode->Is(SOLID)) // { // nodalSigmaTot_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[0]; // nodalSigmaTot_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[1]; // nodalSigmaTot_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[2]; // nodalSigmaDev_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[0]; // nodalSigmaDev_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[1]; // nodalSigmaDev_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[2]; // } itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[0] = nodalSigmaTot_xx; itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[1] = nodalSigmaTot_yy; itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[2] = nodalSigmaTot_xy; itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[0] = nodalSigmaDev_xx; itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[1] = nodalSigmaDev_yy; itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[2] = nodalSigmaDev_xy; } else if (dimension == 3) { itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] = SpatialVelocityGrad(2, 2); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2)); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2)); double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR); if (yieldShear > 0) { itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] + 2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5]); double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE); double exponent = -adaptiveExponent * equivalentStrainRate; if (equivalentStrainRate != 0) { deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent)); } if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff = adaptiveExponent * yieldShear; } } double DefVol = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]; itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol; double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0]; double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1]; double nodalSigmaTot_zz = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]; double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3]; double nodalSigmaTot_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4]; double nodalSigmaTot_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5]; double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0); double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0); double nodalSigmaDev_zz = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] - DefVol / 3.0); double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3]; double nodalSigmaDev_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4]; double nodalSigmaDev_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5]; // if(itNode->Is(SOLID)) // { // nodalSigmaTot_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[0]; // nodalSigmaTot_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[1]; // nodalSigmaTot_zz+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[2]; // nodalSigmaTot_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[3]; // nodalSigmaTot_xz+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[4]; // nodalSigmaTot_yz+=itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS,1)[5]; // nodalSigmaDev_xx+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[0]; // nodalSigmaDev_yy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[1]; // nodalSigmaDev_zz+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[2]; // nodalSigmaDev_xy+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[3]; // nodalSigmaDev_xz+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[4]; // nodalSigmaDev_yz+=itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS,1)[5]; // } itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[0] = nodalSigmaTot_xx; itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[1] = nodalSigmaTot_yy; itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[2] = nodalSigmaTot_zz; itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[3] = nodalSigmaTot_xy; itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[4] = nodalSigmaTot_xz; itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[5] = nodalSigmaTot_yz; itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[0] = nodalSigmaDev_xx; itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[1] = nodalSigmaDev_yy; itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[2] = nodalSigmaDev_zz; itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[3] = nodalSigmaDev_xy; itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[4] = nodalSigmaDev_xz; itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[5] = nodalSigmaDev_yz; } } void CalcNodalStrainsAndStressesForInterfaceSolidNode(ModelPart::NodeIterator itNode) { ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; double youngModulus = itNode->FastGetSolutionStepValue(YOUNG_MODULUS); double poissonRatio = itNode->FastGetSolutionStepValue(POISSON_RATIO); double currFirstLame = timeInterval * poissonRatio * youngModulus / ((1.0 + poissonRatio) * (1.0 - 2.0 * poissonRatio)); double deviatoricCoeff = timeInterval * youngModulus / (1.0 + poissonRatio) * 0.5; Matrix Fgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD); Matrix FgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL); double detFgrad = 1.0; Matrix InvFgrad = ZeroMatrix(dimension, dimension); Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension); if (dimension == 2) { MathUtils<double>::InvertMatrix2(Fgrad, InvFgrad, detFgrad); } else if (dimension == 3) { MathUtils<double>::InvertMatrix3(Fgrad, InvFgrad, detFgrad); } //it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj SpatialVelocityGrad = prod(FgradVel, InvFgrad); if (dimension == 2) { itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR); if (yieldShear > 0) { itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + 4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2])); double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE); double exponent = -adaptiveExponent * equivalentStrainRate; if (equivalentStrainRate != 0) { deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent)); } if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff = adaptiveExponent * yieldShear; } } double DefVol = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol; double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]; double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0); double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0); double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; if (itNode->Is(SOLID)) { nodalSigmaTot_xx += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[0]; nodalSigmaTot_yy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[1]; nodalSigmaTot_xy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[2]; nodalSigmaDev_xx += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[0]; nodalSigmaDev_yy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[1]; nodalSigmaDev_xy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[2]; } itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[0] = nodalSigmaTot_xx; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[1] = nodalSigmaTot_yy; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[2] = nodalSigmaTot_xy; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[0] = nodalSigmaDev_xx; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[1] = nodalSigmaDev_yy; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[2] = nodalSigmaDev_xy; } else if (dimension == 3) { itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] = SpatialVelocityGrad(2, 2); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2)); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2)); double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR); if (yieldShear > 0) { itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + 2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] + 4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] + 4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] + 4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]); double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE); double exponent = -adaptiveExponent * equivalentStrainRate; if (equivalentStrainRate != 0) { deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent)); } if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff = adaptiveExponent * yieldShear; } } double DefVol = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol; double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]; double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; double nodalSigmaTot_zz = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3]; double nodalSigmaTot_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4]; double nodalSigmaTot_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]; double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0); double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0); double nodalSigmaDev_zz = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] - DefVol / 3.0); double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3]; double nodalSigmaDev_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4]; double nodalSigmaDev_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]; if (itNode->Is(SOLID)) { nodalSigmaTot_xx += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[0]; nodalSigmaTot_yy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[1]; nodalSigmaTot_zz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[2]; nodalSigmaTot_xy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[3]; nodalSigmaTot_xz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[4]; nodalSigmaTot_yz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[5]; nodalSigmaDev_xx += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[0]; nodalSigmaDev_yy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[1]; nodalSigmaDev_zz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[2]; nodalSigmaDev_xy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[3]; nodalSigmaDev_xz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[4]; nodalSigmaDev_yz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[5]; } itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[0] = nodalSigmaTot_xx; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[1] = nodalSigmaTot_yy; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[2] = nodalSigmaTot_zz; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[3] = nodalSigmaTot_xy; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[4] = nodalSigmaTot_xz; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[5] = nodalSigmaTot_yz; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[0] = nodalSigmaDev_xx; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[1] = nodalSigmaDev_yy; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[2] = nodalSigmaDev_zz; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[3] = nodalSigmaDev_xy; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[4] = nodalSigmaDev_xz; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[5] = nodalSigmaDev_yz; } } void CalcNodalStrainsAndStressesForSolidNode(ModelPart::NodeIterator itNode) { ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; double youngModulus = itNode->FastGetSolutionStepValue(YOUNG_MODULUS); double poissonRatio = itNode->FastGetSolutionStepValue(POISSON_RATIO); double currFirstLame = timeInterval * poissonRatio * youngModulus / ((1.0 + poissonRatio) * (1.0 - 2.0 * poissonRatio)); double deviatoricCoeff = timeInterval * youngModulus / (1.0 + poissonRatio) * 0.5; Matrix Fgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD); Matrix FgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL); double detFgrad = 1.0; Matrix InvFgrad = ZeroMatrix(dimension, dimension); Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension); if (dimension == 2) { MathUtils<double>::InvertMatrix2(Fgrad, InvFgrad, detFgrad); } else if (dimension == 3) { MathUtils<double>::InvertMatrix3(Fgrad, InvFgrad, detFgrad); } // if(itNode->Is(SOLID)){ // std::cout<<"solid node"<<std::endl; // } // if(itNode->Is(FLUID)){ // std::cout<<"FLUID node"<<std::endl; // } // if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){ // std::cout<<"currFirstLame "<<currFirstLame<<" deviatoricCoeff "<<deviatoricCoeff<<std::endl; // }else{ // std::cout<<"NOT INTERFACE currFirstLame "<<currFirstLame<<" deviatoricCoeff "<<deviatoricCoeff<<std::endl; // } //it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj SpatialVelocityGrad = prod(FgradVel, InvFgrad); if (dimension == 2) { itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR); if (yieldShear > 0) { itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + 4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2])); double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE); double exponent = -adaptiveExponent * equivalentStrainRate; if (equivalentStrainRate != 0) { deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent)); } if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff = adaptiveExponent * yieldShear; } } double DefVol = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol; double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]; double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0); double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0); double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; if (itNode->Is(SOLID)) { nodalSigmaTot_xx += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[0]; nodalSigmaTot_yy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[1]; nodalSigmaTot_xy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[2]; nodalSigmaDev_xx += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[0]; nodalSigmaDev_yy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[1]; nodalSigmaDev_xy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[2]; } itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[0] = nodalSigmaTot_xx; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[1] = nodalSigmaTot_yy; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[2] = nodalSigmaTot_xy; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[0] = nodalSigmaDev_xx; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[1] = nodalSigmaDev_yy; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[2] = nodalSigmaDev_xy; } else if (dimension == 3) { itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] = SpatialVelocityGrad(2, 2); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2)); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2)); double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR); if (yieldShear > 0) { itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + 2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] + 4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] + 4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] + 4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]); double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate = itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE); double exponent = -adaptiveExponent * equivalentStrainRate; if (equivalentStrainRate != 0) { deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent)); } if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff = adaptiveExponent * yieldShear; } } double DefVol = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol; double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]; double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; double nodalSigmaTot_zz = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3]; double nodalSigmaTot_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4]; double nodalSigmaTot_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]; double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0); double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0); double nodalSigmaDev_zz = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] - DefVol / 3.0); double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3]; double nodalSigmaDev_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4]; double nodalSigmaDev_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]; if (itNode->Is(SOLID)) { nodalSigmaTot_xx += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[0]; nodalSigmaTot_yy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[1]; nodalSigmaTot_zz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[2]; nodalSigmaTot_xy += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[3]; nodalSigmaTot_xz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[4]; nodalSigmaTot_yz += itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 1)[5]; nodalSigmaDev_xx += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[0]; nodalSigmaDev_yy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[1]; nodalSigmaDev_zz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[2]; nodalSigmaDev_xy += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[3]; nodalSigmaDev_xz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[4]; nodalSigmaDev_yz += itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 1)[5]; } itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[0] = nodalSigmaTot_xx; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[1] = nodalSigmaTot_yy; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[2] = nodalSigmaTot_zz; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[3] = nodalSigmaTot_xy; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[4] = nodalSigmaTot_xz; itNode->GetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS, 0)[5] = nodalSigmaTot_yz; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[0] = nodalSigmaDev_xx; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[1] = nodalSigmaDev_yy; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[2] = nodalSigmaDev_zz; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[3] = nodalSigmaDev_xy; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[4] = nodalSigmaDev_xz; itNode->GetSolutionStepValue(SOLID_NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[5] = nodalSigmaDev_yz; } } void CalcNodalStrainsForSolidNode(ModelPart::NodeIterator itNode) { /* std::cout << "Calc Nodal Strains " << std::endl; */ ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); // Matrix Fgrad=itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); // Matrix FgradVel=itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); // double detFgrad=1.0; // Matrix InvFgrad=ZeroMatrix(dimension,dimension); // Matrix SpatialVelocityGrad=ZeroMatrix(dimension,dimension); double detFgrad = 1.0; Matrix nodalFgrad = ZeroMatrix(dimension, dimension); Matrix FgradVel = ZeroMatrix(dimension, dimension); Matrix InvFgrad = ZeroMatrix(dimension, dimension); Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension); nodalFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD); FgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL); //Inverse if (dimension == 2) { MathUtils<double>::InvertMatrix2(nodalFgrad, InvFgrad, detFgrad); } else if (dimension == 3) { MathUtils<double>::InvertMatrix3(nodalFgrad, InvFgrad, detFgrad); } //it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj SpatialVelocityGrad = prod(FgradVel, InvFgrad); if (dimension == 2) { itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + 4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2])); double DefX = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]; double DefY = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; double DefVol = DefX + DefY; itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol; } else if (dimension == 3) { itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] = SpatialVelocityGrad(2, 2); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2)); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2)); itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + 2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] + 4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] + 4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] + 4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]); double DefX = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]; double DefY = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; double DefZ = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; double DefVol = DefX + DefY + DefZ; itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol; } } void CalcNodalStrainsForInterfaceSolidNode(ModelPart::NodeIterator itNode) { /* std::cout << "Calc Nodal Strains " << std::endl; */ ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); Matrix Fgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD); Matrix FgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL); double detFgrad = 1.0; Matrix InvFgrad = ZeroMatrix(dimension, dimension); Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension); //Inverse if (dimension == 2) { MathUtils<double>::InvertMatrix2(Fgrad, InvFgrad, detFgrad); } else if (dimension == 3) { MathUtils<double>::InvertMatrix3(Fgrad, InvFgrad, detFgrad); } //it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj SpatialVelocityGrad = prod(FgradVel, InvFgrad); if (dimension == 2) { itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + 4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2])); double DefX = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]; double DefY = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; double DefVol = DefX + DefY; itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol; } else if (dimension == 3) { itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] = SpatialVelocityGrad(2, 2); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2)); itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2)); itNode->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1] + 2.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2] + 4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[3] + 4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[4] + 4.0 * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[5]); double DefX = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[0]; double DefY = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[1]; double DefZ = itNode->GetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE)[2]; double DefVol = DefX + DefY + DefZ; itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = DefVol; } /* std::cout << "Calc Nodal Strains And Stresses DONE " << std::endl; */ } void CalcNodalStrains() { /* std::cout << "Calc Nodal Strains " << std::endl; */ ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); // #pragma omp parallel // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME); double solidNodalVolume = itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME); double theta = 1.0; if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true) { if (nodalVolume > 0) { //I have to compute the strains two times because one time is for the solid and the other for the fluid Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); Vector rNodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); Matrix &interfaceFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); Matrix &interfaceFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); if (interfaceFgrad.size1() != dimension) interfaceFgrad.resize(dimension, dimension, false); if (interfaceFgradVel.size1() != dimension) interfaceFgradVel.resize(dimension, dimension, false); noalias(interfaceFgrad) = ZeroMatrix(dimension, dimension); noalias(interfaceFgradVel) = ZeroMatrix(dimension, dimension); // Matrix interfaceFgrad = ZeroMatrix(dimension,dimension); // Matrix interfaceFgradVel = ZeroMatrix(dimension,dimension); //the following function is more expensive than the general one because there is one loop more over neighbour nodes. This is why I do it here also for fluid interface nodes. ComputeAndStoreNodalDeformationGradientForInterfaceNode(itNode, nodalSFDneighboursId, rNodalSFDneigh, theta, interfaceFgrad, interfaceFgradVel); // itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD)=interfaceFgrad; // itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL)=interfaceFgradVel; this->CalcNodalStrainsForNode(itNode); } if (solidNodalVolume > 0) { Vector solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER); Vector rSolidNodalSFDneigh = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS); Matrix &solidInterfaceFgrad = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD); Matrix &solidInterfaceFgradVel = itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL); if (solidInterfaceFgrad.size1() != dimension) solidInterfaceFgrad.resize(dimension, dimension, false); if (solidInterfaceFgradVel.size1() != dimension) solidInterfaceFgradVel.resize(dimension, dimension, false); noalias(solidInterfaceFgrad) = ZeroMatrix(dimension, dimension); noalias(solidInterfaceFgradVel) = ZeroMatrix(dimension, dimension); // Matrix solidInterfaceFgrad = ZeroMatrix(dimension,dimension); // Matrix solidInterfaceFgradVel = ZeroMatrix(dimension,dimension); ComputeAndStoreNodalDeformationGradientForInterfaceNode(itNode, solidNodalSFDneighboursId, rSolidNodalSFDneigh, theta, solidInterfaceFgrad, solidInterfaceFgradVel); // itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD)=solidInterfaceFgrad; // itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL)=solidInterfaceFgradVel; CalcNodalStrainsForInterfaceSolidNode(itNode); } } else { if (itNode->Is(SOLID) && solidNodalVolume > 0) { ComputeAndStoreNodalDeformationGradientForSolidNode(itNode, theta); CalcNodalStrainsForSolidNode(itNode); } else if (nodalVolume > 0) { this->ComputeAndStoreNodalDeformationGradient(itNode, theta); this->CalcNodalStrainsForNode(itNode); } } if (nodalVolume == 0 && solidNodalVolume == 0) { // if nodalVolume==0 this->InitializeNodalVariablesForRemeshedDomain(itNode); InitializeNodalVariablesForSolidRemeshedDomain(itNode); } // if(itNode->Is(SOLID) && itNode->FastGetSolutionStepValue(INTERFACE_NODE)==false){ // CopyValuesToSolidNonInterfaceNodes(itNode); // } } // } /* std::cout << "Calc Nodal Strains And Stresses DONE " << std::endl; */ } void ComputeAndStoreNodalDeformationGradientForSolidNode(ModelPart::NodeIterator itNode, double theta) { KRATOS_TRY; ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER); Vector rNodalSFDneigh = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS); /* unsigned int idThisNode=nodalSFDneighboursId[0]; */ const unsigned int neighSize = nodalSFDneighboursId.size(); Matrix Fgrad = ZeroMatrix(dimension, dimension); Matrix FgradVel = ZeroMatrix(dimension, dimension); NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); if (dimension == 2) { double dNdXi = rNodalSFDneigh[0]; double dNdYi = rNodalSFDneigh[1]; Fgrad(0, 0) += dNdXi * itNode->X(); Fgrad(0, 1) += dNdYi * itNode->X(); Fgrad(1, 0) += dNdXi * itNode->Y(); Fgrad(1, 1) += dNdYi * itNode->Y(); double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta); double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta); FgradVel(0, 0) += dNdXi * VelocityX; FgradVel(0, 1) += dNdYi * VelocityX; FgradVel(1, 0) += dNdXi * VelocityY; FgradVel(1, 1) += dNdYi * VelocityY; unsigned int firstRow = 2; if (neighSize > 0) { for (unsigned int i = 0; i < neighSize - 1; i++) //neigh_nodes has one cell less than nodalSFDneighboursId becuase this has also the considered node ID at the beginning { dNdXi = rNodalSFDneigh[firstRow]; dNdYi = rNodalSFDneigh[firstRow + 1]; unsigned int neigh_nodes_id = neighb_nodes[i].Id(); unsigned int other_neigh_nodes_id = nodalSFDneighboursId[i + 1]; if (neigh_nodes_id != other_neigh_nodes_id) { std::cout << "node (x,y)=(" << itNode->X() << "," << itNode->Y() << ") with neigh_nodes_id " << neigh_nodes_id << " different than other_neigh_nodes_id " << other_neigh_nodes_id << std::endl; } Fgrad(0, 0) += dNdXi * neighb_nodes[i].X(); Fgrad(0, 1) += dNdYi * neighb_nodes[i].X(); Fgrad(1, 0) += dNdXi * neighb_nodes[i].Y(); Fgrad(1, 1) += dNdYi * neighb_nodes[i].Y(); VelocityX = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta); VelocityY = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta); FgradVel(0, 0) += dNdXi * VelocityX; FgradVel(0, 1) += dNdYi * VelocityX; FgradVel(1, 0) += dNdXi * VelocityY; FgradVel(1, 1) += dNdYi * VelocityY; firstRow += 2; } } } else { double dNdXi = rNodalSFDneigh[0]; double dNdYi = rNodalSFDneigh[1]; double dNdZi = rNodalSFDneigh[2]; double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta); double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta); double VelocityZ = itNode->FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta); Fgrad(0, 0) += dNdXi * itNode->X(); Fgrad(0, 1) += dNdYi * itNode->X(); Fgrad(0, 2) += dNdZi * itNode->X(); Fgrad(1, 0) += dNdXi * itNode->Y(); Fgrad(1, 1) += dNdYi * itNode->Y(); Fgrad(1, 2) += dNdZi * itNode->Y(); Fgrad(2, 0) += dNdXi * itNode->Z(); Fgrad(2, 1) += dNdYi * itNode->Z(); Fgrad(2, 2) += dNdZi * itNode->Z(); FgradVel(0, 0) += dNdXi * VelocityX; FgradVel(0, 1) += dNdYi * VelocityX; FgradVel(0, 2) += dNdZi * VelocityX; FgradVel(1, 0) += dNdXi * VelocityY; FgradVel(1, 1) += dNdYi * VelocityY; FgradVel(1, 2) += dNdZi * VelocityY; FgradVel(2, 0) += dNdXi * VelocityZ; FgradVel(2, 1) += dNdYi * VelocityZ; FgradVel(2, 2) += dNdZi * VelocityZ; unsigned int firstRow = 3; if (neighSize > 0) { for (unsigned int i = 0; i < neighSize - 1; i++) { dNdXi = rNodalSFDneigh[firstRow]; dNdYi = rNodalSFDneigh[firstRow + 1]; dNdZi = rNodalSFDneigh[firstRow + 2]; VelocityX = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta); VelocityY = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta); VelocityZ = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta); Fgrad(0, 0) += dNdXi * neighb_nodes[i].X(); Fgrad(0, 1) += dNdYi * neighb_nodes[i].X(); Fgrad(0, 2) += dNdZi * neighb_nodes[i].X(); Fgrad(1, 0) += dNdXi * neighb_nodes[i].Y(); Fgrad(1, 1) += dNdYi * neighb_nodes[i].Y(); Fgrad(1, 2) += dNdZi * neighb_nodes[i].Y(); Fgrad(2, 0) += dNdXi * neighb_nodes[i].Z(); Fgrad(2, 1) += dNdYi * neighb_nodes[i].Z(); Fgrad(2, 2) += dNdZi * neighb_nodes[i].Z(); FgradVel(0, 0) += dNdXi * VelocityX; FgradVel(0, 1) += dNdYi * VelocityX; FgradVel(0, 2) += dNdZi * VelocityX; FgradVel(1, 0) += dNdXi * VelocityY; FgradVel(1, 1) += dNdYi * VelocityY; FgradVel(1, 2) += dNdZi * VelocityY; FgradVel(2, 0) += dNdXi * VelocityZ; FgradVel(2, 1) += dNdYi * VelocityZ; FgradVel(2, 2) += dNdZi * VelocityZ; firstRow += 3; } } } itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD) = Fgrad; itNode->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL) = FgradVel; KRATOS_CATCH(""); } void ComputeAndStoreNodalDeformationGradientForInterfaceNode(ModelPart::NodeIterator itNode, Vector nodalSFDneighboursId, Vector rNodalSFDneigh, double theta, Matrix &Fgrad, Matrix &FgradVel) { KRATOS_TRY; ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); /* unsigned int idThisNode=nodalSFDneighboursId[0]; */ const unsigned int neighSize = nodalSFDneighboursId.size(); noalias(Fgrad) = ZeroMatrix(dimension, dimension); noalias(FgradVel) = ZeroMatrix(dimension, dimension); NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); const unsigned int neighNodesSize = neighb_nodes.size(); if (dimension == 2) { double dNdXi = rNodalSFDneigh[0]; double dNdYi = rNodalSFDneigh[1]; Fgrad(0, 0) += dNdXi * itNode->X(); Fgrad(0, 1) += dNdYi * itNode->X(); Fgrad(1, 0) += dNdXi * itNode->Y(); Fgrad(1, 1) += dNdYi * itNode->Y(); double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta); double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta); FgradVel(0, 0) += dNdXi * VelocityX; FgradVel(0, 1) += dNdYi * VelocityX; FgradVel(1, 0) += dNdXi * VelocityY; FgradVel(1, 1) += dNdYi * VelocityY; unsigned int firstRow = 2; if (neighSize > 0) { for (unsigned int i = 0; i < neighSize - 1; i++) //neigh_nodes has one cell less than nodalSFDneighboursId becuase this has also the considered node ID at the beginning { unsigned int other_neigh_nodes_id = nodalSFDneighboursId[i + 1]; for (unsigned int k = 0; k < neighNodesSize; k++) { unsigned int neigh_nodes_id = neighb_nodes[k].Id(); if (neigh_nodes_id == other_neigh_nodes_id) { dNdXi = rNodalSFDneigh[firstRow]; dNdYi = rNodalSFDneigh[firstRow + 1]; Fgrad(0, 0) += dNdXi * neighb_nodes[k].X(); Fgrad(0, 1) += dNdYi * neighb_nodes[k].X(); Fgrad(1, 0) += dNdXi * neighb_nodes[k].Y(); Fgrad(1, 1) += dNdYi * neighb_nodes[k].Y(); VelocityX = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta); VelocityY = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta); FgradVel(0, 0) += dNdXi * VelocityX; FgradVel(0, 1) += dNdYi * VelocityX; FgradVel(1, 0) += dNdXi * VelocityY; FgradVel(1, 1) += dNdYi * VelocityY; firstRow += 2; break; } } } } } else { double dNdXi = rNodalSFDneigh[0]; double dNdYi = rNodalSFDneigh[1]; double dNdZi = rNodalSFDneigh[2]; double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta); double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta); double VelocityZ = itNode->FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta); Fgrad(0, 0) += dNdXi * itNode->X(); Fgrad(0, 1) += dNdYi * itNode->X(); Fgrad(0, 2) += dNdZi * itNode->X(); Fgrad(1, 0) += dNdXi * itNode->Y(); Fgrad(1, 1) += dNdYi * itNode->Y(); Fgrad(1, 2) += dNdZi * itNode->Y(); Fgrad(2, 0) += dNdXi * itNode->Z(); Fgrad(2, 1) += dNdYi * itNode->Z(); Fgrad(2, 2) += dNdZi * itNode->Z(); FgradVel(0, 0) += dNdXi * VelocityX; FgradVel(0, 1) += dNdYi * VelocityX; FgradVel(0, 2) += dNdZi * VelocityX; FgradVel(1, 0) += dNdXi * VelocityY; FgradVel(1, 1) += dNdYi * VelocityY; FgradVel(1, 2) += dNdZi * VelocityY; FgradVel(2, 0) += dNdXi * VelocityZ; FgradVel(2, 1) += dNdYi * VelocityZ; FgradVel(2, 2) += dNdZi * VelocityZ; unsigned int firstRow = 3; if (neighSize > 0) { for (unsigned int i = 0; i < neighSize - 1; i++) { unsigned int other_neigh_nodes_id = nodalSFDneighboursId[i + 1]; for (unsigned int k = 0; k < neighNodesSize; k++) { unsigned int neigh_nodes_id = neighb_nodes[k].Id(); if (neigh_nodes_id == other_neigh_nodes_id) { dNdXi = rNodalSFDneigh[firstRow]; dNdYi = rNodalSFDneigh[firstRow + 1]; dNdZi = rNodalSFDneigh[firstRow + 2]; VelocityX = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta); VelocityY = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta); VelocityZ = neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + neighb_nodes[k].FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta); Fgrad(0, 0) += dNdXi * neighb_nodes[k].X(); Fgrad(0, 1) += dNdYi * neighb_nodes[k].X(); Fgrad(0, 2) += dNdZi * neighb_nodes[k].X(); Fgrad(1, 0) += dNdXi * neighb_nodes[k].Y(); Fgrad(1, 1) += dNdYi * neighb_nodes[k].Y(); Fgrad(1, 2) += dNdZi * neighb_nodes[k].Y(); Fgrad(2, 0) += dNdXi * neighb_nodes[k].Z(); Fgrad(2, 1) += dNdYi * neighb_nodes[k].Z(); Fgrad(2, 2) += dNdZi * neighb_nodes[k].Z(); FgradVel(0, 0) += dNdXi * VelocityX; FgradVel(0, 1) += dNdYi * VelocityX; FgradVel(0, 2) += dNdZi * VelocityX; FgradVel(1, 0) += dNdXi * VelocityY; FgradVel(1, 1) += dNdYi * VelocityY; FgradVel(1, 2) += dNdZi * VelocityY; FgradVel(2, 0) += dNdXi * VelocityZ; FgradVel(2, 1) += dNdYi * VelocityZ; FgradVel(2, 2) += dNdZi * VelocityZ; firstRow += 3; break; } } } } } // itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD)=Fgrad; // itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL)=FgradVel; KRATOS_CATCH(""); } void UpdateTopology(ModelPart &rModelPart, unsigned int echoLevel) { KRATOS_TRY; // std::cout<<" UpdateTopology ..."<<std::endl; /* this->CalculateDisplacements(); */ CalculateDisplacementsAndResetNodalVariables(); BaseType::MoveMesh(); BoundaryNormalsCalculationUtilities BoundaryComputation; BoundaryComputation.CalculateWeightedBoundaryNormals(rModelPart, echoLevel); // std::cout<<" UpdateTopology DONE"<<std::endl; KRATOS_CATCH(""); } void CalculateDisplacementsAndResetNodalVariables() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double TimeStep = rCurrentProcessInfo[DELTA_TIME]; const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); unsigned int sizeStrains = 3 * (dimension - 1); // #pragma omp parallel // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator i = NodesBegin; i != NodesEnd; ++i) { array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0); array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3> &CurrentDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 0); array_1d<double, 3> &PreviousDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 1); CurrentDisplacement[0] = 0.5 * TimeStep * (CurrentVelocity[0] + PreviousVelocity[0]) + PreviousDisplacement[0]; CurrentDisplacement[1] = 0.5 * TimeStep * (CurrentVelocity[1] + PreviousVelocity[1]) + PreviousDisplacement[1]; if (dimension == 3) { CurrentDisplacement[2] = 0.5 * TimeStep * (CurrentVelocity[2] + PreviousVelocity[2]) + PreviousDisplacement[2]; } ///// reset Nodal variables ////// Vector &rNodalSFDneighbours = i->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); unsigned int sizeSDFNeigh = rNodalSFDneighbours.size(); // unsigned int neighbourNodes=i->GetValue(NEIGHBOUR_NODES).size()+1; // unsigned int sizeSDFNeigh=neighbourNodes*dimension; i->FastGetSolutionStepValue(NODAL_VOLUME) = 0; i->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0; i->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0; i->FastGetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = 0; i->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = 0; noalias(rNodalSFDneighbours) = ZeroVector(sizeSDFNeigh); Vector &rSpatialDefRate = i->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); noalias(rSpatialDefRate) = ZeroVector(sizeStrains); Matrix &rFgrad = i->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); noalias(rFgrad) = ZeroMatrix(dimension, dimension); Matrix &rFgradVel = i->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); noalias(rFgradVel) = ZeroMatrix(dimension, dimension); // if(i->FastGetSolutionStepValue(INTERFACE_NODE)==true){ Vector &rSolidNodalSFDneighbours = i->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS); unsigned int solidSizeSDFNeigh = rSolidNodalSFDneighbours.size(); // unsigned int solidSizeSDFNeigh=solidNeighbourNodes*dimension; i->FastGetSolutionStepValue(SOLID_NODAL_VOLUME) = 0; i->FastGetSolutionStepValue(SOLID_NODAL_MEAN_MESH_SIZE) = 0; i->FastGetSolutionStepValue(SOLID_NODAL_FREESURFACE_AREA) = 0; i->FastGetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) = 0; i->FastGetSolutionStepValue(SOLID_NODAL_EQUIVALENT_STRAIN_RATE) = 0; noalias(rSolidNodalSFDneighbours) = ZeroVector(solidSizeSDFNeigh); Vector &rSolidSpatialDefRate = i->FastGetSolutionStepValue(SOLID_NODAL_SPATIAL_DEF_RATE); noalias(rSolidSpatialDefRate) = ZeroVector(sizeStrains); Matrix &rSolidFgrad = i->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD); noalias(rSolidFgrad) = ZeroMatrix(dimension, dimension); Matrix &rSolidFgradVel = i->FastGetSolutionStepValue(SOLID_NODAL_DEFORMATION_GRAD_VEL); noalias(rSolidFgradVel) = ZeroMatrix(dimension, dimension); // } } // } } /// Turn back information as a string. std::string Info() const override { std::stringstream buffer; buffer << "NodalTwoStepVPStrategyForFSI"; return buffer.str(); } /// Print information about this object. void PrintInfo(std::ostream &rOStream) const override { rOStream << "NodalTwoStepVPStrategyForFSI"; } // /// Print object's data. // void PrintData(std::ostream& rOStream) const override // { // } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected Life Cycle ///@{ ///@} ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. NodalTwoStepVPStrategyForFSI &operator=(NodalTwoStepVPStrategyForFSI const &rOther) {} /// Copy constructor. NodalTwoStepVPStrategyForFSI(NodalTwoStepVPStrategyForFSI const &rOther) {} ///@} }; /// Class NodalTwoStepVPStrategyForFSI ///@} ///@name Type Definitions ///@{ ///@} ///@} // addtogroup } // namespace Kratos. #endif // KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_H
blake2bp.c
/* BLAKE2 reference source code package - optimized C implementations Written in 2012 by Samuel Neves <sneves@dei.uc.pt> To the extent possible under law, the author(s) have dedicated all copyright and related and neighboring rights to this software to the public domain worldwide. This software is distributed without any warranty. You should have received a copy of the CC0 Public Domain Dedication along with this software. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdint.h> #if defined(_OPENMP) #include <omp.h> #endif #include "blake2.h" #include "blake2-impl.h" #define PARALLELISM_DEGREE 4 static int blake2bp_init_leaf( blake2b_state *S, uint8_t outlen, uint8_t keylen, uint64_t offset ) { blake2b_param P[1]; P->digest_length = outlen; P->key_length = keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; store32(&P->leaf_length, 0); store64(&P->node_offset, offset); P->node_depth = 0; P->inner_length = BLAKE2B_OUTBYTES; memset( P->reserved, 0, sizeof( P->reserved ) ); memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); blake2b_init_param( S, P ); S->outlen = P->inner_length; return 0; } static int blake2bp_init_root( blake2b_state *S, uint8_t outlen, uint8_t keylen ) { blake2b_param P[1]; P->digest_length = outlen; P->key_length = keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; store32(&P->leaf_length, 0); store64(&P->node_offset, 0); P->node_depth = 1; P->inner_length = BLAKE2B_OUTBYTES; memset( P->reserved, 0, sizeof( P->reserved ) ); memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); blake2b_init_param( S, P ); S->outlen = P->digest_length; return 0; } int blake2bp_init( blake2bp_state *S, size_t outlen ) { if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; if( blake2bp_init_root( S->R, ( uint8_t ) outlen, 0 ) < 0 ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S->S[i], ( uint8_t ) outlen, 0, i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; S->outlen = ( uint8_t ) outlen; return 0; } int blake2bp_init_key( blake2bp_state *S, size_t outlen, const void *key, size_t keylen ) { if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; if( !key || !keylen || keylen > BLAKE2B_KEYBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; if( blake2bp_init_root( S->R, ( uint8_t ) outlen, ( uint8_t ) keylen ) < 0 ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S->S[i], ( uint8_t ) outlen, ( uint8_t ) keylen, i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; S->outlen = ( uint8_t ) outlen; { uint8_t block[BLAKE2B_BLOCKBYTES]; memset( block, 0, BLAKE2B_BLOCKBYTES ); memcpy( block, key, keylen ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->S[i], block, BLAKE2B_BLOCKBYTES ); secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */ } return 0; } int blake2bp_update( blake2bp_state *S, const uint8_t *in, size_t inlen ) { size_t left = S->buflen; size_t fill = sizeof( S->buf ) - left; if( left && inlen >= fill ) { memcpy( S->buf + left, in, fill ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, BLAKE2B_BLOCKBYTES ); in += fill; inlen -= fill; left = 0; } #if defined(_OPENMP) omp_set_num_threads(PARALLELISM_DEGREE); #pragma omp parallel shared(S) #else for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ ) #endif { #if defined(_OPENMP) size_t id__ = ( size_t ) omp_get_thread_num(); #endif size_t inlen__ = inlen; const uint8_t *in__ = ( const uint8_t * )in; in__ += id__ * BLAKE2B_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ) { blake2b_update( S->S[id__], in__, BLAKE2B_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; } } in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ); inlen %= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; if( inlen > 0 ) memcpy( S->buf + left, in, inlen ); S->buflen = ( uint32_t ) left + ( uint32_t ) inlen; return 0; } int blake2bp_final( blake2bp_state *S, uint8_t *out, size_t outlen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES]; if(S->outlen != outlen) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) { if( S->buflen > i * BLAKE2B_BLOCKBYTES ) { size_t left = S->buflen - i * BLAKE2B_BLOCKBYTES; if( left > BLAKE2B_BLOCKBYTES ) left = BLAKE2B_BLOCKBYTES; blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, left ); } blake2b_final( S->S[i], hash[i], BLAKE2B_OUTBYTES ); } for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->R, hash[i], BLAKE2B_OUTBYTES ); return blake2b_final( S->R, out, outlen ); } int blake2bp( uint8_t *out, const void *in, const void *key, size_t outlen, size_t inlen, size_t keylen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES]; blake2b_state S[PARALLELISM_DEGREE][1]; blake2b_state FS[1]; /* Verify parameters */ if ( NULL == in && inlen > 0 ) return -1; if ( NULL == out ) return -1; if ( NULL == key && keylen > 0) return -1; if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; if( keylen > BLAKE2B_KEYBYTES ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S[i], ( uint8_t ) outlen, ( uint8_t ) keylen, i ) < 0 ) return -1; S[PARALLELISM_DEGREE - 1]->last_node = 1; // mark last node if( keylen > 0 ) { uint8_t block[BLAKE2B_BLOCKBYTES]; memset( block, 0, BLAKE2B_BLOCKBYTES ); memcpy( block, key, keylen ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S[i], block, BLAKE2B_BLOCKBYTES ); secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */ } #if defined(_OPENMP) omp_set_num_threads(PARALLELISM_DEGREE); #pragma omp parallel shared(S,hash) #else for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ ) #endif { #if defined(_OPENMP) size_t id__ = ( size_t ) omp_get_thread_num(); #endif size_t inlen__ = inlen; const uint8_t *in__ = ( const uint8_t * )in; in__ += id__ * BLAKE2B_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ) { blake2b_update( S[id__], in__, BLAKE2B_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; } if( inlen__ > id__ * BLAKE2B_BLOCKBYTES ) { const size_t left = inlen__ - id__ * BLAKE2B_BLOCKBYTES; const size_t len = left <= BLAKE2B_BLOCKBYTES ? left : BLAKE2B_BLOCKBYTES; blake2b_update( S[id__], in__, len ); } blake2b_final( S[id__], hash[id__], BLAKE2B_OUTBYTES ); } if( blake2bp_init_root( FS, ( uint8_t ) outlen, ( uint8_t ) keylen ) < 0 ) return -1; FS->last_node = 1; // Mark as last node for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( FS, hash[i], BLAKE2B_OUTBYTES ); return blake2b_final( FS, out, outlen ); }
update_ops_matrix_diagonal_single.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include "constant.h" #include "update_ops.h" #include "utility.h" #ifdef _OPENMP #include <omp.h> #endif #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif //void single_qubit_diagonal_matrix_gate_old_single(UINT target_qubit_index, const CTYPE diagonal_matrix[2], CTYPE *state, ITYPE dim); //void single_qubit_diagonal_matrix_gate_old_parallel(UINT target_qubit_index, const CTYPE diagonal_matrix[2], CTYPE *state, ITYPE dim); void single_qubit_diagonal_matrix_gate(UINT target_qubit_index, const CTYPE diagonal_matrix[2], CTYPE *state, ITYPE dim) { //single_qubit_diagonal_matrix_gate_old_single(target_qubit_index, diagonal_matrix, state, dim); //single_qubit_diagonal_matrix_gate_old_parallel(target_qubit_index, diagonal_matrix, state, dim); //single_qubit_diagonal_matrix_gate_single_unroll(target_qubit_index, diagonal_matrix, state, dim); //single_qubit_diagonal_matrix_gate_single_simd(target_qubit_index, diagonal_matrix, state, dim); //single_qubit_diagonal_matrix_gate_parallel_simd(target_qubit_index, diagonal_matrix, state, dim); #ifdef _USE_SIMD #ifdef _OPENMP UINT threshold = 12; if (dim < (((ITYPE)1) << threshold)) { single_qubit_diagonal_matrix_gate_single_simd(target_qubit_index, diagonal_matrix, state, dim); } else { single_qubit_diagonal_matrix_gate_parallel_simd(target_qubit_index, diagonal_matrix, state, dim); } #else single_qubit_diagonal_matrix_gate_single_simd(target_qubit_index, diagonal_matrix, state, dim); #endif #else #ifdef _OPENMP UINT threshold = 12; if (dim < (((ITYPE)1) << threshold)) { single_qubit_diagonal_matrix_gate_single_unroll(target_qubit_index, diagonal_matrix, state, dim); } else { single_qubit_diagonal_matrix_gate_parallel_unroll(target_qubit_index, diagonal_matrix, state, dim); } #else single_qubit_diagonal_matrix_gate_single_unroll(target_qubit_index, diagonal_matrix, state, dim); #endif #endif } void single_qubit_diagonal_matrix_gate_single_unroll(UINT target_qubit_index, const CTYPE diagonal_matrix[2], CTYPE *state, ITYPE dim) { // loop variables const ITYPE loop_dim = dim; ITYPE state_index; if (target_qubit_index == 0) { for (state_index = 0; state_index < loop_dim; state_index += 2) { state[state_index] *= diagonal_matrix[0]; state[state_index+1] *= diagonal_matrix[1]; } } else { ITYPE mask = 1ULL << target_qubit_index; for (state_index = 0; state_index < loop_dim; state_index += 2) { int bitval = ((state_index&mask) != 0); state[state_index] *= diagonal_matrix[bitval]; state[state_index + 1] *= diagonal_matrix[bitval]; } } } #ifdef _OPENMP void single_qubit_diagonal_matrix_gate_parallel_unroll(UINT target_qubit_index, const CTYPE diagonal_matrix[2], CTYPE *state, ITYPE dim) { // loop variables const ITYPE loop_dim = dim; ITYPE state_index; if (target_qubit_index == 0) { #pragma omp parallel for for (state_index = 0; state_index < loop_dim; state_index += 2) { state[state_index] *= diagonal_matrix[0]; state[state_index + 1] *= diagonal_matrix[1]; } } else { ITYPE mask = 1ULL << target_qubit_index; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; state_index += 2) { int bitval = ((state_index&mask) != 0); state[state_index] *= diagonal_matrix[bitval]; state[state_index + 1] *= diagonal_matrix[bitval]; } } } #endif #ifdef _USE_SIMD void single_qubit_diagonal_matrix_gate_single_simd(UINT target_qubit_index, const CTYPE diagonal_matrix[2], CTYPE *state, ITYPE dim) { // loop variables const ITYPE loop_dim = dim; ITYPE state_index; if (target_qubit_index == 0) { __m256d mv0 = _mm256_set_pd(-cimag(diagonal_matrix[1]),creal(diagonal_matrix[1]),-cimag(diagonal_matrix[0]),creal(diagonal_matrix[0])); __m256d mv1 = _mm256_set_pd(creal(diagonal_matrix[1]), cimag(diagonal_matrix[1]), creal(diagonal_matrix[0]), cimag(diagonal_matrix[0])); for (state_index = 0; state_index < loop_dim; state_index += 2) { double* ptr = (double*)(state + state_index); __m256d data = _mm256_loadu_pd(ptr); __m256d data0 = _mm256_mul_pd(data, mv0); __m256d data1 = _mm256_mul_pd(data, mv1); data = _mm256_hadd_pd(data0, data1); _mm256_storeu_pd(ptr, data); } } else { __m256d mv0 = _mm256_set_pd(-cimag(diagonal_matrix[0]), creal(diagonal_matrix[0]), -cimag(diagonal_matrix[0]), creal(diagonal_matrix[0])); __m256d mv1 = _mm256_set_pd(creal(diagonal_matrix[0]), cimag(diagonal_matrix[0]), creal(diagonal_matrix[0]), cimag(diagonal_matrix[0])); __m256d mv2 = _mm256_set_pd(-cimag(diagonal_matrix[1]), creal(diagonal_matrix[1]), -cimag(diagonal_matrix[1]), creal(diagonal_matrix[1])); __m256d mv3 = _mm256_set_pd(creal(diagonal_matrix[1]), cimag(diagonal_matrix[1]), creal(diagonal_matrix[1]), cimag(diagonal_matrix[1])); //__m256i mask = _mm256_set1_epi64x(1LL<<target_qubit_index); ITYPE mask = 1LL << target_qubit_index; for (state_index = 0; state_index < loop_dim; state_index += 2) { double* ptr = (double*)(state + state_index); ITYPE flag = (state_index & mask); __m256d mv4 = flag ? mv2 : mv0; __m256d mv5 = flag ? mv3 : mv1; __m256d data = _mm256_loadu_pd(ptr); __m256d data0 = _mm256_mul_pd(data, mv4); __m256d data1 = _mm256_mul_pd(data, mv5); data = _mm256_hadd_pd(data0, data1); _mm256_storeu_pd(ptr, data); } } } #ifdef _OPENMP void single_qubit_diagonal_matrix_gate_parallel_simd(UINT target_qubit_index, const CTYPE diagonal_matrix[2], CTYPE *state, ITYPE dim) { // loop variables const ITYPE loop_dim = dim; ITYPE state_index; if (target_qubit_index == 0) { __m256d mv0 = _mm256_set_pd(-cimag(diagonal_matrix[1]), creal(diagonal_matrix[1]), -cimag(diagonal_matrix[0]), creal(diagonal_matrix[0])); __m256d mv1 = _mm256_set_pd(creal(diagonal_matrix[1]), cimag(diagonal_matrix[1]), creal(diagonal_matrix[0]), cimag(diagonal_matrix[0])); #pragma omp parallel for for (state_index = 0; state_index < loop_dim; state_index += 2) { double* ptr = (double*)(state + state_index); __m256d data = _mm256_loadu_pd(ptr); __m256d data0 = _mm256_mul_pd(data, mv0); __m256d data1 = _mm256_mul_pd(data, mv1); data = _mm256_hadd_pd(data0, data1); _mm256_storeu_pd(ptr, data); } } else { __m256d mv0 = _mm256_set_pd(-cimag(diagonal_matrix[0]), creal(diagonal_matrix[0]), -cimag(diagonal_matrix[0]), creal(diagonal_matrix[0])); __m256d mv1 = _mm256_set_pd(creal(diagonal_matrix[0]), cimag(diagonal_matrix[0]), creal(diagonal_matrix[0]), cimag(diagonal_matrix[0])); __m256d mv2 = _mm256_set_pd(-cimag(diagonal_matrix[1]), creal(diagonal_matrix[1]), -cimag(diagonal_matrix[1]), creal(diagonal_matrix[1])); __m256d mv3 = _mm256_set_pd(creal(diagonal_matrix[1]), cimag(diagonal_matrix[1]), creal(diagonal_matrix[1]), cimag(diagonal_matrix[1])); //__m256i mask = _mm256_set1_epi64x(1LL<<target_qubit_index); ITYPE mask = 1LL << target_qubit_index; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; state_index += 2) { double* ptr = (double*)(state + state_index); ITYPE flag = (state_index & mask); __m256d mv4 = flag ? mv2 : mv0; __m256d mv5 = flag ? mv3 : mv1; __m256d data = _mm256_loadu_pd(ptr); __m256d data0 = _mm256_mul_pd(data, mv4); __m256d data1 = _mm256_mul_pd(data, mv5); data = _mm256_hadd_pd(data0, data1); _mm256_storeu_pd(ptr, data); } } } #endif #endif /* void single_qubit_diagonal_matrix_gate_old_single(UINT target_qubit_index, const CTYPE diagonal_matrix[2], CTYPE *state, ITYPE dim) { // loop variables const ITYPE loop_dim = dim; ITYPE state_index; for (state_index = 0; state_index < loop_dim; ++state_index) { // determin matrix pos UINT bit_val = (state_index >> target_qubit_index) % 2; // set value state[state_index] *= diagonal_matrix[bit_val]; } } #ifdef _OPENMP void single_qubit_diagonal_matrix_gate_old_parallel(UINT target_qubit_index, const CTYPE diagonal_matrix[2], CTYPE *state, ITYPE dim) { // loop variables const ITYPE loop_dim = dim; ITYPE state_index; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { // determin matrix pos UINT bit_val = (state_index >> target_qubit_index) % 2; // set value state[state_index] *= diagonal_matrix[bit_val]; } } #endif */
orthoInvTrans.c
#include<Python.h> #include<numpy/arrayobject.h> #include<math.h> #include<omp.h> #define IND(a,i) *((double *)(a->data+i*a->strides[0])) #define IND2(a,i,j) *((double *)(a->data+i*a->strides[0]+j*a->strides[1])) static PyObject *orthoInvTrans(PyObject *self, PyObject *args, PyObject *keywds); static PyObject *orthoInvTrans(PyObject *self, PyObject *args, PyObject *keywds) { // PyObject *etc; PyArrayObject *params, *newparams, *invtrans, *etc; //double goal,r0,r1,sint,cost,pm; int i,j; npy_intp tdims[2], pdims[1]; // etc = PyList_New(0); static char *kwlist[] = {"params","invtrans","etc",NULL}; if(!PyArg_ParseTupleAndKeywords(args,keywds,"OO|O",kwlist,&params,&invtrans,&etc)) { return NULL; } //goal = IND(rampparams,0); tdims[0] = invtrans->dimensions[0]; tdims[1] = invtrans->dimensions[1]; //printf("%i, %i\n",tdim0,tdim1); pdims[0] = params->dimensions[0]; //pdims[1] = params->dimensions[1]; newparams = (PyArrayObject *) PyArray_SimpleNew(1,pdims,PyArray_DOUBLE); //#pragma omp parallel for for(i=0;i<tdims[0];i++) { IND(newparams,i)=0; //IND2(newparams,i,j)=0; for(j=0;j<tdims[1];j++) { IND(newparams,i) += IND2(invtrans,i,j)*IND(params,j); //IND2(newparams,i,k)+= IND2(invtrans,i,j)*IND2(params,j,k); } IND(newparams,i) += IND(etc,i); } return PyArray_Return(newparams); } static char orthoInvTrans_doc[]="\ This function uses principal component analysis to modify parameter values.\n\ \n\ Parameters\n\ ----------\n\ params: Array of parameters to be modified\n\ invtrans: Inverse transformation matrix, np.matrix() type\n\ origin: Array of len(params) indicating the reference frame origin\n\ \n\ Returns\n\ -------\n\ This function returns the modified parameter values\n\ \n\ Revisions\n\ ---------\n\ 2011-07-25 Kevin Stevenson, UCF \n\ kevin218@knights.ucf.edu\n\ Original version\n\ "; static PyMethodDef orthoInvTrans_methods[] = { {"orthoInvTrans",(PyCFunction)orthoInvTrans,METH_VARARGS|METH_KEYWORDS,orthoInvTrans_doc},{NULL}}; void initorthoInvTrans(void) { Py_InitModule("orthoInvTrans",orthoInvTrans_methods); import_array(); }
GB_unaryop__identity_uint8_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint8_fp64 // op(A') function: GB_tran__identity_uint8_fp64 // C type: uint8_t // A type: double // cast: uint8_t cij ; GB_CAST_UNSIGNED(cij,aij,8) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint8_t z ; GB_CAST_UNSIGNED(z,x,8) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint8_fp64 ( uint8_t *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint8_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
wrapmpifftw.c
#include <stdio.h> #include <stdlib.h> #include <hpccmema.h> #include "hpccfft.h" #include "wrapmpifftw.h" #define Mmax3( a_, b_, c_ ) ( (a_) > (b_) ? ((a_) > (c_) ? (a_) : (c_)) : ((b_) > (c_) ? (b_) : (c_)) ) static int GetNXYZ(s64Int_t n, int npu) { int ip[3], lnx[3], lny[3], lnz[3], lnpu[3]; int i, nx, ny, nz, nxyz; HPCC_factor235( npu, lnpu ); HPCC_factor235_8( n, ip ); for (i = 0; i < 3; ++i) { EMAX( lnz[i], lnpu[i], (ip[i]+1)/3 ); EMAX( lnx[i], lnpu[i], (ip[i]-lnz[i]+1)/2 ); lny[i] = ip[i] - lnx[i] - lnz[i]; } nx = HPCC_ipow( 2, lnx[0] ) * HPCC_ipow( 3, lnx[1] ) * HPCC_ipow( 5, lnx[2] ); ny = HPCC_ipow( 2, lny[0] ) * HPCC_ipow( 3, lny[1] ) * HPCC_ipow( 5, lny[2] ); nz = HPCC_ipow( 2, lnz[0] ) * HPCC_ipow( 3, lnz[1] ) * HPCC_ipow( 5, lnz[2] ); nxyz = Mmax3( nx, ny, nz ); return nxyz; } hpcc_fftw_mpi_plan HPCC_fftw_mpi_create_plan(MPI_Comm comm, s64Int_t n, fftw_direction dir, int flags) { hpcc_fftw_mpi_plan p; fftw_complex *a = NULL, *b = NULL; int nxyz; int rank, size; MPI_Comm_size( comm, &size ); MPI_Comm_rank( comm, &rank ); p = (hpcc_fftw_mpi_plan)fftwf_malloc( sizeof *p ); if (! p) return p; nxyz = GetNXYZ( n, size ); p->wx = (fftw_complex *)HPCC_fftw_malloc( (nxyz/2 + FFTE_NP) * (sizeof *p->wx) ); p->wy = (fftw_complex *)HPCC_fftw_malloc( (nxyz/2 + FFTE_NP) * (sizeof *p->wy) ); p->wz = (fftw_complex *)HPCC_fftw_malloc( (nxyz/2 + FFTE_NP) * (sizeof *p->wz) ); p->work = (fftw_complex *)HPCC_fftw_malloc( n / size * 3 / 2 * (sizeof *p->work) ); p->c_size = (nxyz+FFTE_NP) * (FFTE_NBLK + 1) + FFTE_NP; #ifdef _OPENMP #pragma omp parallel { #pragma omp single { int i; i = omp_get_num_threads(); p->c = (fftw_complex *)HPCC_fftw_malloc( p->c_size * (sizeof *p->c) * i ); } } #else p->c = (fftw_complex *)HPCC_fftw_malloc( p->c_size * (sizeof *p->c) ); #endif if (! p->wx || ! p->wy || ! p->wz || ! p->work || ! p->c) { if (p->c) HPCC_fftw_free( p->c ); if (p->work) HPCC_fftw_free( p->work ); if (p->wz) HPCC_fftw_free( p->wz ); if (p->wy) HPCC_fftw_free( p->wy ); if (p->wx) HPCC_fftw_free( p->wx ); fftwf_free( p ); return NULL; } p->n = n; p->comm = comm; p->dir = dir; p->flags = flags; MPI_Type_contiguous( 2, MPI_DOUBLE, &p->cmplx ); MPI_Type_commit( &p->cmplx ); if (FFTW_FORWARD == p->dir) p->timings = HPCC_fft_timings_forward; else p->timings = HPCC_fft_timings_backward; HPCC_pzfft1d( n, a, b, p->work, rank, size, 0, p ); return p; } void HPCC_fftw_mpi_destroy_plan(hpcc_fftw_mpi_plan p) { if (!p) return; MPI_Type_free( &p->cmplx ); HPCC_fftw_free( p->work ); HPCC_fftw_free( p->c ); HPCC_fftw_free( p->wz ); HPCC_fftw_free( p->wy ); HPCC_fftw_free( p->wx ); fftwf_free( p ); } void HPCC_fftw_mpi(hpcc_fftw_mpi_plan p, int n_fields, fftw_complex *local_data, fftw_complex *work){ int rank, size; s64Int_t n; int i, ln; MPI_Comm_size( p->comm, &size ); MPI_Comm_rank( p->comm, &rank ); n = p->n; if (FFTW_FORWARD == p->dir) HPCC_pzfft1d( n, local_data, work, p->work, rank, size, -1, p ); else HPCC_pzfft1d( n, local_data, work, p->work, rank, size, +1, p ); ln = n / size; for (i = 0; i < ln; ++i) { c_assgn( local_data[i], work[i] ); } } void HPCC_fftw_mpi_local_sizes(hpcc_fftw_mpi_plan p, s64Int_t *local_n, s64Int_t *local_start, s64Int_t *local_n_after_transform, s64Int_t *local_start_after_transform, s64Int_t *total_local_size) { int rank, size; s64Int_t n; MPI_Comm_size( p->comm, &size ); MPI_Comm_rank( p->comm, &rank ); n = p->n; if (local_n) *local_n = n / size; if (local_start) *local_start = n / size * rank; if (local_n_after_transform) *local_n_after_transform = n / size; if (local_start_after_transform) *local_start_after_transform = n / size * rank; if (total_local_size) *total_local_size = n / size; }
zlantr.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" /***************************************************************************//** * * @ingroup plasma_lantr * * Returns the norm of a trapezoidal or triangular matrix as * * zlantr = ( max(abs(A(i,j))), NORM = PlasmaMaxNorm * ( * ( norm1(A), NORM = PlasmaOneNorm * ( * ( normI(A), NORM = PlasmaInfNorm * ( * ( normF(A), NORM = PlasmaFrobeniusNorm * * where norm1 denotes the one norm of a matrix (maximum column sum), * normI denotes the infinity norm of a matrix (maximum row sum) and * normF denotes the Frobenius norm of a matrix (square root of sum * of squares). Note that max(abs(A(i,j))) is not a consistent matrix * norm. * ******************************************************************************* * * @param[in] norm * - PlasmaMaxNorm: max norm * - PlasmaOneNorm: one norm * - PlasmaInfNorm: infinity norm * - PlasmaFrobeniusNorm: Frobenius norm * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] diag * - PlasmaNonUnit: A has non-unit diagonal, * - PlasmaUnit: A has unit diagonal. * * @param[in] m * The number of rows of the matrix A. m >= 0. When m = 0, * the returned value is set to zero. * * @param[in] n * The number of columns of the matrix A. n >= 0. When n = 0, * the returned value is set to zero. * * @param[in] pA * The m-by-n trapezoidal matrix A. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * ******************************************************************************* * * @retval double * The specified norm of the trapezoidal or triangular matrix A. * ******************************************************************************* * * @sa plasma_omp_zlantr * @sa plasma_clantr * @sa plasma_dlantr * @sa plasma_slantr * ******************************************************************************/ double plasma_zlantr(plasma_enum_t norm, plasma_enum_t uplo, plasma_enum_t diag, int m, int n, plasma_complex64_t *pA, int lda) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((norm != PlasmaMaxNorm) && (norm != PlasmaOneNorm) && (norm != PlasmaInfNorm) && (norm != PlasmaFrobeniusNorm) ) { plasma_error("illegal value of norm"); return -1; } if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); return -2; } if ((diag != PlasmaUnit) && (diag != PlasmaNonUnit)) { plasma_error("illegal value of diag"); return -3; } if (m < 0) { plasma_error("illegal value of m"); return -4; } if (n < 0) { plasma_error("illegal value of n"); return -5; } if (lda < imax(1, m)) { printf("%d\n", lda); plasma_error("illegal value of lda"); return -7; } // quick return if (imin(n, m) == 0) return 0.0; // Set tiling parameters. int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; int retval; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, m, n, 0, 0, m, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } // Allocate workspace. double *work = NULL; switch (norm) { case PlasmaMaxNorm: work = (double*)malloc((size_t)A.mt*A.nt*sizeof(double)); break; case PlasmaOneNorm: work = (double*)calloc(((size_t)A.mt*A.n+A.n), sizeof(double)); break; case PlasmaInfNorm: work = (double*)calloc(((size_t)A.nt*A.m+A.m), sizeof(double)); break; case PlasmaFrobeniusNorm: work = (double*)calloc((size_t)2*A.mt*A.nt, sizeof(double)); break; } if (work == NULL) { plasma_error("malloc() failed"); return PlasmaErrorOutOfMemory; } // Create sequence. plasma_sequence_t *sequence = NULL; retval = plasma_sequence_create(&sequence); if (retval != PlasmaSuccess) { plasma_error("plasma_sequence_create() failed"); return retval; } // Initialize request. plasma_request_t request = PlasmaRequestInitializer; double value; // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_zge2desc(pA, lda, A, sequence, &request); // Call tile async function. plasma_omp_zlantr(norm, uplo, diag, A, work, &value, sequence, &request); } // implicit synchronization free(work); // Free matrix in tile layout. plasma_desc_destroy(&A); // Destroy sequence. plasma_sequence_destroy(sequence); // Return the norm. return value; } /***************************************************************************//** * * @ingroup plasma_lantr * * Calculates the max, one, infinity or Frobenius norm of a general matrix. * Non-blocking equivalent of plasma_zlantr(). May return before the * computation is finished. Operates on matrices stored by tiles. All matrices * are passed through descriptors. All dimensions are taken from the * descriptors. Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] norm * - PlasmaMaxNorm: Max norm * - PlasmaOneNorm: One norm * - PlasmaInfNorm: Infinity norm * - PlasmaFrobeniusNorm: Frobenius norm * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] diag * - PlasmaNonUnit: A has non-unit diagonal, * - PlasmaUnit: A has unit diagonal. * * @param[in] A * The descriptor of matrix A. * * @param[out] work * Workspace of size: * - PlasmaMaxNorm: A.mt*A.nt * - PlasmaOneNorm: A.mt*A.n + A.n * - PlasmaInfNorm: A.mt*A.n + A.n * - PlasmaFrobeniusNorm: 2*A.mt*A.nt * * @param[out] value * The calculated value of the norm requested. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_zlantr * @sa plasma_omp_clantr * @sa plasma_omp_dlantr * @sa plasma_omp_slantr * ******************************************************************************/ void plasma_omp_zlantr(plasma_enum_t norm, plasma_enum_t uplo, plasma_enum_t diag, plasma_desc_t A, double *work, double *value, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((norm != PlasmaMaxNorm) && (norm != PlasmaOneNorm) && (norm != PlasmaInfNorm) && (norm != PlasmaFrobeniusNorm)) { plasma_error("illegal value of norm"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if ((diag != PlasmaUnit) && (diag != PlasmaNonUnit)) { plasma_error("illegal value of diag"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid descriptor A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (imin(A.m, A.n) == 0) { *value = 0.0; return; } // Call the parallel function. plasma_pzlantr(norm, uplo, diag, A, work, value, sequence, request); }
GeneralMatrixMatrix.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at the mozilla.org home page #ifndef EIGEN_GENERAL_MATRIX_MATRIX_H #define EIGEN_GENERAL_MATRIX_MATRIX_H namespace Eigen { namespace internal { template<typename _LhsScalar, typename _RhsScalar> class level3_blocking; /* Specialization for a row-major destination matrix => simple transposition of the product */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, int ResInnerStride> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor,ResInnerStride> { typedef gebp_traits<RhsScalar,LhsScalar> Traits; typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; static EIGEN_STRONG_INLINE void run( Index rows, Index cols, Index depth, const LhsScalar* lhs, Index lhsStride, const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resIncr, Index resStride, ResScalar alpha, level3_blocking<RhsScalar,LhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { // transpose the product such that the result is column major general_matrix_matrix_product<Index, RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs, LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs, ColMajor,ResInnerStride> ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resIncr,resStride,alpha,blocking,info); } }; /* Specialization for a col-major destination matrix * => Blocking algorithm following Goto's paper */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, int ResInnerStride> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor,ResInnerStride> { typedef gebp_traits<LhsScalar,RhsScalar> Traits; typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; static void run(Index rows, Index cols, Index depth, const LhsScalar* _lhs, Index lhsStride, const RhsScalar* _rhs, Index rhsStride, ResScalar* _res, Index resIncr, Index resStride, ResScalar alpha, level3_blocking<LhsScalar,RhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper; typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper; typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor,Unaligned,ResInnerStride> ResMapper; LhsMapper lhs(_lhs, lhsStride); RhsMapper rhs(_rhs, rhsStride); ResMapper res(_res, resStride, resIncr); Index kc = blocking.kc(); // cache block size along the K direction Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs; gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs; gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp; #ifdef EIGEN_HAS_OPENMP if(info) { // this is the parallel version! int tid = omp_get_thread_num(); int threads = omp_get_num_threads(); LhsScalar* blockA = blocking.blockA(); eigen_internal_assert(blockA!=0); std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0); // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs... for(Index k=0; k<depth; k+=kc) { const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A' // In order to reduce the chance that a thread has to wait for the other, // let's start by packing B'. pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc); // Pack A_k to A' in a parallel fashion: // each thread packs the sub block A_k,i to A'_i where i is the thread id. // However, before copying to A'_i, we have to make sure that no other thread is still using it, // i.e., we test that info[tid].users equals 0. // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it. while(info[tid].users!=0) {} info[tid].users += threads; pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length); // Notify the other threads that the part A'_i is ready to go. info[tid].sync = k; // Computes C_i += A' * B' per A'_i for(int shift=0; shift<threads; ++shift) { int i = (tid+shift)%threads; // At this point we have to make sure that A'_i has been updated by the thread i, // we use testAndSetOrdered to mimic a volatile access. // However, no need to wait for the B' part which has been updated by the current thread! if (shift>0) { while(info[i].sync!=k) { } } gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha); } // Then keep going as usual with the remaining B' for(Index j=nc; j<cols; j+=nc) { const Index actual_nc = (std::min)(j+nc,cols)-j; // pack B_k,j to B' pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc); // C_j += A' * B' gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha); } // Release all the sub blocks A'_i of A' for the current thread, // i.e., we simply decrement the number of users by 1 for(Index i=0; i<threads; ++i) #pragma omp atomic info[i].users -= 1; } } else #endif // EIGEN_HAS_OPENMP { EIGEN_UNUSED_VARIABLE(info); // this is the sequential version! std::size_t sizeA = kc*mc; std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA()); ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB()); const bool pack_rhs_once = mc!=rows && kc==depth && nc==cols; // For each horizontal panel of the rhs, and corresponding panel of the lhs... for(Index i2=0; i2<rows; i2+=mc) { const Index actual_mc = (std::min)(i2+mc,rows)-i2; for(Index k2=0; k2<depth; k2+=kc) { const Index actual_kc = (std::min)(k2+kc,depth)-k2; // OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs. // => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching) // Note that this panel will be read as many times as the number of blocks in the rhs's // horizontal panel which is, in practice, a very low number. pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc); // For each kc x nc block of the rhs's horizontal panel... for(Index j2=0; j2<cols; j2+=nc) { const Index actual_nc = (std::min)(j2+nc,cols)-j2; // We pack the rhs's block into a sequential chunk of memory (L2 caching) // Note that this block will be read a very high number of times, which is equal to the number of // micro horizontal panel of the large rhs's panel (e.g., rows/12 times). if((!pack_rhs_once) || i2==0) pack_rhs(blockB, rhs.getSubMapper(k2,j2), actual_kc, actual_nc); // Everything is packed, we can now call the panel * block kernel: gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha); } } } } } }; /********************************************************************************* * Specialization of generic_product_impl for "large" GEMM, i.e., * implementation of the high level wrapper to general_matrix_matrix_product **********************************************************************************/ template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType> struct gemm_functor { gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking) : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking) {} void initParallelSession(Index num_threads) const { m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads); m_blocking.allocateA(); } void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const { if(cols==-1) cols = m_rhs.cols(); Gemm::run(rows, cols, m_lhs.cols(), &m_lhs.coeffRef(row,0), m_lhs.outerStride(), &m_rhs.coeffRef(0,col), m_rhs.outerStride(), (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.innerStride(), m_dest.outerStride(), m_actualAlpha, m_blocking, info); } typedef typename Gemm::Traits Traits; protected: const Lhs& m_lhs; const Rhs& m_rhs; Dest& m_dest; Scalar m_actualAlpha; BlockingType& m_blocking; }; template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1, bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space; template<typename _LhsScalar, typename _RhsScalar> class level3_blocking { typedef _LhsScalar LhsScalar; typedef _RhsScalar RhsScalar; protected: LhsScalar* m_blockA; RhsScalar* m_blockB; Index m_mc; Index m_nc; Index m_kc; public: level3_blocking() : m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0) {} inline Index mc() const { return m_mc; } inline Index nc() const { return m_nc; } inline Index kc() const { return m_kc; } inline LhsScalar* blockA() { return m_blockA; } inline RhsScalar* blockB() { return m_blockB; } }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor, ActualRows = Transpose ? MaxCols : MaxRows, ActualCols = Transpose ? MaxRows : MaxCols }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; enum { SizeA = ActualRows * MaxDepth, SizeB = ActualCols * MaxDepth }; #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES EIGEN_ALIGN_MAX LhsScalar m_staticA[SizeA]; EIGEN_ALIGN_MAX RhsScalar m_staticB[SizeB]; #else EIGEN_ALIGN_MAX char m_staticA[SizeA * sizeof(LhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; EIGEN_ALIGN_MAX char m_staticB[SizeB * sizeof(RhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; #endif public: gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/) { this->m_mc = ActualRows; this->m_nc = ActualCols; this->m_kc = MaxDepth; #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES this->m_blockA = m_staticA; this->m_blockB = m_staticB; #else this->m_blockA = reinterpret_cast<LhsScalar*>((internal::UIntPtr(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); this->m_blockB = reinterpret_cast<RhsScalar*>((internal::UIntPtr(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); #endif } void initParallel(Index, Index, Index, Index) {} inline void allocateA() {} inline void allocateB() {} inline void allocateAll() {} }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; Index m_sizeA; Index m_sizeB; public: gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; if(l3_blocking) { computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads); } else // no l3 blocking { Index n = this->m_nc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, n, num_threads); } m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void initParallel(Index rows, Index cols, Index depth, Index num_threads) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0); Index m = this->m_mc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc, num_threads); m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void allocateA() { if(this->m_blockA==0) this->m_blockA = aligned_new<LhsScalar>(m_sizeA); } void allocateB() { if(this->m_blockB==0) this->m_blockB = aligned_new<RhsScalar>(m_sizeB); } void allocateAll() { allocateA(); allocateB(); } ~gemm_blocking_space() { aligned_delete(this->m_blockA, m_sizeA); aligned_delete(this->m_blockB, m_sizeB); } }; } // end namespace internal namespace internal { template<typename Lhs, typename Rhs> struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> > { typedef typename Product<Lhs,Rhs>::Scalar Scalar; typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef internal::blas_traits<Lhs> LhsBlasTraits; typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned; typedef internal::blas_traits<Rhs> RhsBlasTraits; typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned; enum { MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime) }; typedef generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> lazyproduct; template<typename Dst> static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) lazyproduct::eval_dynamic(dst, lhs, rhs, internal::assign_op<typename Dst::Scalar,Scalar>()); else { dst.setZero(); scaleAndAddTo(dst, lhs, rhs, Scalar(1)); } } template<typename Dst> static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) lazyproduct::eval_dynamic(dst, lhs, rhs, internal::add_assign_op<typename Dst::Scalar,Scalar>()); else scaleAndAddTo(dst,lhs, rhs, Scalar(1)); } template<typename Dst> static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) lazyproduct::eval_dynamic(dst, lhs, rhs, internal::sub_assign_op<typename Dst::Scalar,Scalar>()); else scaleAndAddTo(dst, lhs, rhs, Scalar(-1)); } template<typename Dest> static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha) { eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols()); if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0) return; typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs); typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs); Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs) * RhsBlasTraits::extractScalarFactor(a_rhs); typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar, Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType; typedef internal::gemm_functor< Scalar, Index, internal::general_matrix_matrix_product< Index, LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate), RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate), (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor, Dest::InnerStrideAtCompileTime>, ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor; BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true); internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)> (GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), a_lhs.cols(), Dest::Flags&RowMajorBit); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_GENERAL_MATRIX_MATRIX_H
comm.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /** * Copyright (c) 2015 by Contributors */ #ifndef MXNET_KVSTORE_COMM_H_ #define MXNET_KVSTORE_COMM_H_ #include <dmlc/omp.h> #include <string> #include <algorithm> #include <utility> #include <limits> #include <vector> #include <tuple> #include <thread> #include "mxnet/ndarray.h" #include "gradient_compression.h" #include "../ndarray/ndarray_function.h" #include "../operator/tensor/sparse_retain-inl.h" #include "./kvstore_utils.h" namespace mxnet { namespace kvstore { /** * \brief multiple device commmunication */ class Comm { public: Comm() { pinned_ctx_ = Context::CPUPinned(0); } virtual ~Comm() { } /** * \brief init key with the data shape and storage shape */ virtual void Init(int key, const NDArrayStorageType stype, const TShape& shape, int dtype = mshadow::kFloat32) = 0; /** * \brief returns src[0] + .. + src[src.size()-1] */ virtual const NDArray& Reduce( int key, const std::vector<NDArray>& src, int priority) = 0; /** * \brief copy from src to dst[i] for every i */ virtual void Broadcast( int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) = 0; /** * \brief broadcast src to dst[i] with target row_ids for every i * \param key the identifier key for the stored ndarray * \param src the source row_sparse ndarray to broadcast * \param dst a list of destination row_sparse NDArray and its target row_ids to broadcast, where the row_ids are expected to be unique and sorted in row_id.data() * \param priority the priority of the operation */ virtual void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) = 0; /** * \brief return a pinned contex */ Context pinned_ctx() const { return pinned_ctx_; } /** * \brief Sets gradient compression parameters to be able to * perform reduce with compressed gradients */ void SetGradientCompression(std::shared_ptr<GradientCompression> gc) { gc_ = gc; } protected: Context pinned_ctx_; std::shared_ptr<GradientCompression> gc_; }; /** * \brief an implemention of Comm that first copy data to CPU memeory, and then * reduce there */ class CommCPU : public Comm { public: CommCPU() { nthread_reduction_ = dmlc::GetEnv("MXNET_KVSTORE_REDUCTION_NTHREADS", 4); bigarray_bound_ = dmlc::GetEnv("MXNET_KVSTORE_BIGARRAY_BOUND", 1000 * 1000); // TODO(junwu) delete the following data member, now for benchmark only is_serial_push_ = dmlc::GetEnv("MXNET_KVSTORE_SERIAL_PUSH", 0); } virtual ~CommCPU() { } void Init(int key, const NDArrayStorageType stype, const TShape& shape, int type = mshadow::kFloat32) override { // Delayed allocation - the dense merged buffer might not be used at all if push() // only sees sparse arrays bool delay_alloc = true; merge_buf_[key].merged = NDArray(shape, pinned_ctx_, delay_alloc, type); } const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) override { auto& buf = merge_buf_[key]; const auto stype = src[0].storage_type(); // avoid extra copy for single device, but it may bring problems for // abnormal usage of kvstore if (src.size() == 1) { if (stype == kDefaultStorage) { return src[0]; } else { // With 'local' kvstore, we could store the weight on CPU while compute // the gradient on GPU when the weight is extremely large. // To avoiding copying the weight to the same context of the gradient, // we always copy the gradient to merged buf. NDArray& merged = buf.merged_buf(stype); CopyFromTo(src[0], &merged, priority); return merged; } } NDArray& buf_merged = buf.merged_buf(stype); // normal dense reduce if (stype == kDefaultStorage) { std::vector<Engine::VarHandle> const_vars(src.size() - 1); std::vector<NDArray> reduce(src.size()); CopyFromTo(src[0], &buf_merged, priority); reduce[0] = buf_merged; if (buf.copy_buf.empty()) { buf.copy_buf.resize(src.size()-1); for (size_t j = 0; j < src.size() - 1; ++j) { // allocate copy buffer buf.copy_buf[j] = NDArray( src[0].shape(), pinned_ctx_, false, src[0].dtype()); } } CHECK(stype == buf.copy_buf[0].storage_type()) << "Storage type mismatch detected. " << stype << "(src) vs. " << buf.copy_buf[0].storage_type() << "(buf.copy_buf)"; for (size_t i = 1; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i-1]), priority); reduce[i] = buf.copy_buf[i-1]; const_vars[i-1] = reduce[i].var(); } Engine::Get()->PushAsync( [reduce, this](RunContext rctx, Engine::CallbackOnComplete on_complete) { ReduceSumCPU(reduce); on_complete(); }, Context::CPU(), const_vars, {reduce[0].var()}, FnProperty::kCPUPrioritized, priority, "KVStoreReduce"); } else { // sparse reduce std::vector<Engine::VarHandle> const_vars(src.size()); std::vector<NDArray> reduce(src.size()); if (buf.copy_buf.empty()) { buf.copy_buf.resize(src.size()); for (size_t j = 0; j < src.size(); ++j) { buf.copy_buf[j] = NDArray( src[0].storage_type(), src[0].shape(), pinned_ctx_, true, src[0].dtype()); } } CHECK(stype == buf.copy_buf[0].storage_type()) << "Storage type mismatch detected. " << stype << "(src) vs. " << buf.copy_buf[0].storage_type() << "(buf.copy_buf)"; for (size_t i = 0; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; const_vars[i] = reduce[i].var(); } Resource rsc = ResourceManager::Get()->Request(buf_merged.ctx(), ResourceRequest(ResourceRequest::kTempSpace)); Engine::Get()->PushAsync( [reduce, buf_merged, rsc, this](RunContext rctx, Engine::CallbackOnComplete on_complete) { NDArray out = buf_merged; is_serial_push_? ReduceSumCPUExSerial(reduce, &out) : mxnet::ndarray::ElementwiseSum(rctx.get_stream<cpu>(), rsc, reduce, &out); on_complete(); }, Context::CPU(), const_vars, {buf_merged.var(), rsc.var}, FnProperty::kCPUPrioritized, priority, "KVStoreReduce"); } return buf_merged; } void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) override { int mask = src.ctx().dev_mask(); if (mask == Context::kCPU) { for (auto d : dst) CopyFromTo(src, d, priority); } else { // First copy data to pinned_ctx, then broadcast. // Note that kv.init initializes the data on pinned_ctx. // This branch indicates push() with ndarrays on gpus were called, // and the source is copied to gpu ctx. // Also indicates that buffers are already initialized during push(). auto& buf = merge_buf_[key].merged_buf(src.storage_type()); CopyFromTo(src, &buf, priority); for (auto d : dst) CopyFromTo(buf, d, priority); } } void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) override { using namespace mshadow; CHECK_EQ(src.storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row-sparse src NDArray"; CHECK_EQ(src.ctx().dev_mask(), Context::kCPU) << "BroadcastRowSparse with src on gpu context not supported"; for (const auto& dst_kv : dst) { NDArray* out = dst_kv.first; NDArray row_id = dst_kv.second; CHECK_EQ(out->storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row_sparse dst NDArray"; CHECK_EQ(row_id.ctx().dev_mask(), Context::kCPU) << "BroadcastRowSparse with row_indices on gpu context not supported"; // retain according to unique indices const bool is_same_ctx = out->ctx() == src.ctx(); const bool is_diff_var = out->var() != src.var(); NDArray retained_cpu = (is_same_ctx && is_diff_var) ? *out : NDArray(kRowSparseStorage, src.shape(), src.ctx(), true, src.dtype(), src.aux_types()); if (!is_diff_var) { common::LogOnce("The output of row_sparse_pull() on key " + std::to_string(key) + "refers to the same NDArray as the one stored in KVStore." "Performing row_sparse_pull() with such output is going to change the " "data stored in KVStore. Incorrect result may be generated " "next time row_sparse_pull() is called. To avoid such an issue," "consider create a new NDArray buffer to store the output."); } Engine::Get()->PushAsync( [=](RunContext rctx, Engine::CallbackOnComplete on_complete) { const TBlob& indices = row_id.data(); NDArray temp = retained_cpu; // get rid the of const qualifier op::SparseRetainOpForwardRspImpl<cpu>(rctx.get_stream<cpu>(), src, indices, kWriteTo, &temp); on_complete(); }, Context::CPU(), {src.var(), row_id.var()}, {retained_cpu.var()}, FnProperty::kNormal, priority, "KVStoreSparseRetain"); // if retained_cpu == out, CopyFromTo will ignore the copy operation CopyFromTo(retained_cpu, out, priority); } } private: // reduce sum into val[0] inline void ReduceSumCPU(const std::vector<NDArray> &in_data) { MSHADOW_TYPE_SWITCH(in_data[0].dtype(), DType, { std::vector<DType*> dptr(in_data.size()); for (size_t i = 0; i < in_data.size(); ++i) { TBlob data = in_data[i].data(); CHECK(data.CheckContiguous()); dptr[i] = data.FlatTo2D<cpu, DType>().dptr_; } size_t total = in_data[0].shape().Size(); ReduceSumCPUImpl(dptr, total); }); } // serial implementation of reduce sum for row sparse NDArray. inline void ReduceSumCPUExSerial(const std::vector<NDArray> &in, NDArray *out) { using namespace rowsparse; using namespace mshadow; auto stype = out->storage_type(); CHECK_EQ(stype, kRowSparseStorage) << "Unexpected storage type " << stype; size_t total_num_rows = 0; size_t num_in = in.size(); // skip the ones with empty indices and values std::vector<bool> skip(num_in, false); // the values tensor of the inputs MSHADOW_TYPE_SWITCH(out->dtype(), DType, { MSHADOW_IDX_TYPE_SWITCH(out->aux_type(kIdx), IType, { std::vector<Tensor<cpu, 2, DType>> in_vals(num_in); std::vector<Tensor<cpu, 1, IType>> in_indices(num_in); // offset to the values tensor of all inputs std::vector<size_t> offsets(num_in, 0); std::vector<size_t> num_rows(num_in, 0); for (size_t i = 0; i < num_in; i++) { if (!in[i].storage_initialized()) { skip[i] = true; continue; } auto size = in[i].aux_shape(kIdx).Size(); num_rows[i] = size; total_num_rows += size; in_vals[i] = in[i].data().FlatTo2D<cpu, DType>(); in_indices[i] = in[i].aux_data(kIdx).FlatTo1D<cpu, IType>(); } std::vector<IType> indices; indices.reserve(total_num_rows); // gather indices from all inputs for (size_t i = 0; i < num_in; i++) { for (size_t j = 0; j < num_rows[i]; j++) { indices.emplace_back(in_indices[i][j]); } } CHECK_EQ(indices.size(), total_num_rows); // dedup indices std::sort(indices.begin(), indices.end()); indices.resize(std::unique(indices.begin(), indices.end()) - indices.begin()); // the one left are unique non-zero rows size_t nnr = indices.size(); // allocate memory for output out->CheckAndAlloc({Shape1(nnr)}); auto idx_data = out->aux_data(kIdx).FlatTo1D<cpu, IType>(); auto val_data = out->data().FlatTo2D<cpu, DType>(); for (size_t i = 0; i < nnr; i++) { // copy indices back idx_data[i] = indices[i]; bool zeros = true; for (size_t j = 0; j < num_in; j++) { if (skip[j]) continue; size_t offset = offsets[j]; if (offset < num_rows[j]) { if (indices[i] == in_indices[j][offset]) { if (zeros) { Copy(val_data[i], in_vals[j][offset], nullptr); zeros = false; } else { val_data[i] += in_vals[j][offset]; } offsets[j] += 1; } } } } }); }); } template<typename DType> inline static void ReduceSumCPU( const std::vector<DType*> &dptr, size_t offset, index_t size) { using namespace mshadow; // NOLINT(*) Tensor<cpu, 1, DType> in_0(dptr[0] + offset, Shape1(size)); for (size_t i = 1; i < dptr.size(); i+=4) { switch (dptr.size() - i) { case 1: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); in_0 += in_1; break; } case 2: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); in_0 += in_1 + in_2; break; } case 3: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size)); in_0 += in_1 + in_2 + in_3; break; } default: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_4(dptr[i+3] + offset, Shape1(size)); in_0 += in_1 + in_2 + in_3 + in_4; break; } } } } template<typename DType> inline void ReduceSumCPUImpl(std::vector<DType*> dptr, size_t total) { const size_t step = std::min(bigarray_bound_, static_cast<size_t>(4 << 10)); long ntask = (total + step - 1) / step; // NOLINT(*) if (total < bigarray_bound_ || nthread_reduction_ <= 1) { ReduceSumCPU(dptr, 0, total); } else { #pragma omp parallel for schedule(static) num_threads(nthread_reduction_) for (long j = 0; j < ntask; ++j) { // NOLINT(*) size_t k = static_cast<size_t>(j); size_t begin = std::min(k * step, total); size_t end = std::min((k + 1) * step, total); if (j == ntask - 1) CHECK_EQ(end, total); ReduceSumCPU(dptr, begin, static_cast<index_t>(end - begin)); } } } /// \brief temporal space for pushing and pulling struct BufferEntry { /// \brief the merged value NDArray merged; /// \brief the cpu buffer for gpu data std::vector<NDArray> copy_buf; /// \brief the merged buffer for the given storage type inline NDArray& merged_buf(NDArrayStorageType stype) { if (stype == kDefaultStorage) { return merged; } CHECK(stype == kRowSparseStorage) << "unexpected storage type " << stype; // check if sparse_merged is initialized if (sparse_merged.is_none()) { CHECK(!merged.is_none()); sparse_merged = NDArray(kRowSparseStorage, merged.shape(), merged.ctx(), true, merged.dtype()); } return sparse_merged; } private: /// \brief the sparse merged value NDArray sparse_merged; }; std::unordered_map<int, BufferEntry> merge_buf_; size_t bigarray_bound_; int nthread_reduction_; bool is_serial_push_; }; /** * \brief an implementation of Comm that performs reduction on device * directly. * * It is faster if the total device-to-device bandwidths is larger than * device-to-cpu, which is often true for 4 or 8 GPUs. But it uses more device * memory. */ class CommDevice : public Comm { public: CommDevice() { inited_ = false; } virtual ~CommDevice() { } void Init(int key, const NDArrayStorageType stype, const TShape& shape, int dtype = mshadow::kFloat32) override { sorted_key_attrs_.emplace_back(key, shape, dtype); inited_ = false; } void InitBuffersAndComm(const std::vector<NDArray>& src) { if (!inited_) { std::vector<Context> devs; for (const auto& a : src) { devs.push_back(a.ctx()); } InitMergeBuffer(devs); if (dmlc::GetEnv("MXNET_ENABLE_GPU_P2P", 1)) { EnableP2P(devs); } } } const NDArray& ReduceRowSparse(int key, const std::vector<NDArray>& src, int priority) { auto& buf = merge_buf_[key]; std::vector<NDArray> reduce(src.size()); const NDArrayStorageType stype = src[0].storage_type(); NDArray& buf_merged = buf.merged_buf(stype); if (buf.copy_buf.empty()) { // initialize buffer for copying during reduce buf.copy_buf.resize(src.size()); for (size_t j = 0; j < src.size(); ++j) { buf.copy_buf[j] = NDArray(stype, src[0].shape(), buf_merged.ctx(), true, src[0].dtype()); } } CHECK(src[0].storage_type() == buf.copy_buf[0].storage_type()) << "Storage type mismatch detected. " << src[0].storage_type() << "(src) vs. " << buf.copy_buf[0].storage_type() << "(buf.copy_buf)"; for (size_t i = 0; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf_merged, priority); return buf_merged; } const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) override { // when this reduce is called from kvstore_dist, gc is not set // we don't do compression twice in dist_sync_device if ((gc_ != nullptr) && (gc_->get_type() != CompressionType::kNone)) { return ReduceCompressed(key, src, priority); } // avoid extra copy for single device, but it may bring problems for // abnormal usage of kvstore if (src.size() == 1) { return src[0]; } InitBuffersAndComm(src); auto& buf = merge_buf_[key]; const NDArrayStorageType stype = src[0].storage_type(); NDArray& buf_merged = buf.merged_buf(stype); // normal dense reduce if (stype == kDefaultStorage) { CopyFromTo(src[0], &buf_merged, priority); std::vector<NDArray> reduce(src.size()); reduce[0] = buf_merged; if (buf.copy_buf.empty()) { // TODO(mli) this results in large device memory usage for huge ndarray, // such as the largest fullc in VGG. consider to do segment reduce with // NDArray.Slice or gpu direct memory access. for the latter, we need to // remove some ctx check, and also it reduces 20% perf buf.copy_buf.resize(src.size()-1); for (size_t i = 0; i < src.size()-1; ++i) { buf.copy_buf[i] = NDArray( buf_merged.shape(), buf_merged.ctx(), false, buf_merged.dtype()); } } for (size_t i = 0; i < src.size()-1; ++i) { CopyFromTo(src[i+1], &(buf.copy_buf[i]), priority); reduce[i+1] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf_merged, priority); } else { // sparse reduce buf_merged = ReduceRowSparse(key, src, priority); } return buf_merged; } const NDArray& ReduceCompressed(int key, const std::vector<NDArray>& src, int priority) { InitBuffersAndComm(src); auto& buf = merge_buf_[key]; std::vector<NDArray> reduce(src.size()); if (buf.copy_buf.empty()) { // one buf for each context buf.copy_buf.resize(src.size()); buf.compressed_recv_buf.resize(src.size()); buf.compressed_send_buf.resize(src.size()); buf.residual.resize(src.size()); for (size_t i = 0; i < src.size(); ++i) { buf.copy_buf[i] = NDArray(buf.merged.shape(), buf.merged.ctx(), false, buf.merged.dtype()); buf.residual[i] = NDArray(buf.merged.shape(), src[i].ctx(), false, buf.merged.dtype()); buf.residual[i] = 0; int64_t small_size = gc_->GetCompressedSize(buf.merged.shape().Size()); buf.compressed_recv_buf[i] = NDArray(TShape{small_size}, buf.merged.ctx(), false, buf.merged.dtype()); buf.compressed_send_buf[i] = NDArray(TShape{small_size}, src[i].ctx(), false, buf.merged.dtype()); } } for (size_t i = 0; i < src.size(); ++i) { // compress before copy // this is done even if the data is on same context as copy_buf because // we don't want the training to be biased towards data on this GPU gc_->Quantize(src[i], &(buf.compressed_send_buf[i]), &(buf.residual[i]), priority); if (buf.compressed_send_buf[i].ctx() != buf.compressed_recv_buf[i].ctx()) { CopyFromTo(buf.compressed_send_buf[i], &(buf.compressed_recv_buf[i]), priority); } else { // avoid memory copy when they are on same context buf.compressed_recv_buf[i] = buf.compressed_send_buf[i]; } gc_->Dequantize(buf.compressed_recv_buf[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf.merged); return buf.merged; } void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) override { if (!inited_) { // copy to a random device first int dev_id = key % dst.size(); CopyFromTo(src, dst[dev_id], priority); for (size_t i = 0; i < dst.size(); ++i) { if (i != static_cast<size_t>(dev_id)) { CopyFromTo(*dst[dev_id], dst[i], priority); } } } else { auto& buf_merged = merge_buf_[key].merged_buf(src.storage_type()); CopyFromTo(src, &buf_merged, priority); for (auto d : dst) { CopyFromTo(buf_merged, d, priority); } } } void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) override { CHECK_EQ(src.storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row-sparse src NDArray"; for (const auto& dst_kv : dst) { NDArray* out = dst_kv.first; NDArray row_id = dst_kv.second; CHECK_EQ(out->storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row_sparse dst NDArray"; CHECK_EQ(row_id.ctx(), src.ctx()) << "row_id and src are expected to be on the same context"; // retain according to indices const bool is_same_ctx = out->ctx() == src.ctx(); const bool is_diff_var = out->var() != src.var(); NDArray retained_gpu = (is_same_ctx && is_diff_var) ? *out : NDArray(kRowSparseStorage, out->shape(), src.ctx(), true, out->dtype(), out->aux_types()); if (!is_diff_var) { common::LogOnce("The output of row_sparse_pull() on key " + std::to_string(key) + "refers to the same NDArray as the one stored in KVStore." "Performing row_sparse_pull() with such output is going to change the " "data stored in KVStore. Incorrect result may be generated " "next time row_sparse_pull() is called. To avoid such an issue," "consider create a new NDArray buffer to store the output."); } bool is_gpu = retained_gpu.ctx().dev_mask() == gpu::kDevMask; Engine::Get()->PushAsync([=](RunContext rctx, Engine::CallbackOnComplete on_complete) { const TBlob& indices = row_id.data(); using namespace mxnet::common; NDArray temp = retained_gpu; switch (temp.ctx().dev_mask()) { case cpu::kDevMask: { SparseRetainOpForwardRspWrapper<cpu>(rctx.get_stream<cpu>(), src, indices, kWriteTo, &temp); break; } #if MXNET_USE_GPU case gpu::kDevMask: { SparseRetainOpForwardRspWrapper<gpu>(rctx.get_stream<gpu>(), src, indices, kWriteTo, &temp); // wait for GPU operations to complete rctx.get_stream<gpu>()->Wait(); break; } #endif default: LOG(FATAL) << MXNET_GPU_NOT_ENABLED_ERROR; } on_complete(); }, retained_gpu.ctx(), {src.var(), row_id.var()}, {retained_gpu.var()}, is_gpu ? FnProperty::kGPUPrioritized : FnProperty::kCPUPrioritized, priority, "KVStoreSparseRetain"); CopyFromTo(retained_gpu, out, priority); } } using KeyAttrs = std::tuple<int, TShape, int>; // try to allocate buff on device evenly void InitMergeBuffer(const std::vector<Context>& devs) { std::sort(sorted_key_attrs_.begin(), sorted_key_attrs_.end(), []( const KeyAttrs& a, const KeyAttrs& b) { return std::get<1>(a).Size() > std::get<1>(b).Size(); }); std::unordered_map<int, std::pair<Context, size_t>> ctx_info; for (auto d : devs) { ctx_info[d.dev_id] = std::make_pair(d, 0); } for (auto& sorted_key_attr : sorted_key_attrs_) { const int key = std::get<0>(sorted_key_attr); const TShape& shape = std::get<1>(sorted_key_attr); const int type = std::get<2>(sorted_key_attr); auto& buf = merge_buf_[key]; Context ctx; size_t min_size = std::numeric_limits<size_t>::max(); for (auto& ctx_info_kv : ctx_info) { size_t size = ctx_info_kv.second.second; if (size <= min_size) { ctx = ctx_info_kv.second.first; min_size = size; } } // Delayed allocation - as the dense merged buffer might not be used at all if push() // only sees sparse arrays if (buf.merged.is_none()) { bool delay_alloc = true; buf.merged = NDArray(shape, ctx, delay_alloc, type); } ctx_info[ctx.dev_id].second += shape.Size(); } inited_ = true; } private: void EnableP2P(const std::vector<Context>& devs) { #if MXNET_USE_GPU std::vector<int> gpus; for (const auto& d : devs) { if (d.dev_mask() == gpu::kDevMask) { gpus.push_back(d.dev_id); } } int n = static_cast<int>(gpus.size()); int enabled = 0; std::vector<int> p2p(n*n); // Restores active device to what it was before EnableP2P mxnet::common::cuda::DeviceStore device_store; for (int i = 0; i < n; ++i) { device_store.SetDevice(gpus[i]); for (int j = 0; j < n; j++) { int access; hipDeviceCanAccessPeer(&access, gpus[i], gpus[j]); if (access) { hipError_t e = hipDeviceEnablePeerAccess(gpus[j], 0); if (e == hipSuccess || e == hipErrorPeerAccessAlreadyEnabled) { ++enabled; p2p[i*n+j] = 1; } } } } if (enabled != n*(n-1)) { // print warning info if not fully enabled LOG(WARNING) << "only " << enabled << " out of " << n*(n-1) << " GPU pairs are enabled direct access. " << "It may affect the performance. " << "You can set MXNET_ENABLE_GPU_P2P=0 to turn it off"; std::string access(n, '.'); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { access[j] = p2p[i*n+j] ? 'v' : '.'; } LOG(WARNING) << access; } } #endif } /// \brief temporal space for pushing and pulling struct BufferEntry { /// \brief the dense merged value for reduce and broadcast operations NDArray merged; /// \brief the gpu buffer for copy during reduce operation std::vector<NDArray> copy_buf; /// \brief the residual buffer for gradient compression std::vector<NDArray> residual; /// \brief the small buffer for compressed data in sender std::vector<NDArray> compressed_send_buf; /// \brief the small buffer for compressed data in receiver std::vector<NDArray> compressed_recv_buf; /// \brief the merged buffer for the given storage type (could be either dense or row_sparse) inline NDArray& merged_buf(NDArrayStorageType stype) { if (stype == kDefaultStorage) { CHECK(!merged.is_none()) << "unintialized merge buffer detected"; return merged; } CHECK(stype == kRowSparseStorage) << "unexpected storage type " << stype; // check if sparse_merged is initialized if (sparse_merged.is_none()) { CHECK(!merged.is_none()); sparse_merged = NDArray(kRowSparseStorage, merged.shape(), merged.ctx(), true, merged.dtype()); } return sparse_merged; } private: /// \brief the sparse merged value for reduce and rowsparse broadcast operations NDArray sparse_merged; }; std::unordered_map<int, BufferEntry> merge_buf_; public: bool inited_; std::vector<KeyAttrs> sorted_key_attrs_; }; } // namespace kvstore } // namespace mxnet #endif // MXNET_KVSTORE_COMM_H_
inputBug330-1.c
/* test preprocessing info before and after a statement */ int main (int argc,char** argv) { int i; #if defined(_OPENMP) #pragma omp master { i++; } #endif return 0; }
scaling_solver.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // #if !defined(KRATOS_SCALING_SOLVER_H_INCLUDED ) #define KRATOS_SCALING_SOLVER_H_INCLUDED // System includes #include <cmath> #include <complex> // External includes // Project includes #include "includes/define.h" #include "factories/linear_solver_factory.h" #include "linear_solvers/linear_solver.h" #include "utilities/openmp_utils.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ScalingSolver * @ingroup KratosCore * @brief This solvers rescales in order to improve the conditioning of the system * @details Rescales the matrix, and uses a given linear solver * @author Riccardo Rossi * @tparam TSparseSpaceType The sparse space definition * @tparam TDenseSpaceType The dense space definition * @tparam TReordererType The reorder considered */ template<class TSparseSpaceType, class TDenseSpaceType, class TReordererType = Reorderer<TSparseSpaceType, TDenseSpaceType> > class ScalingSolver : public LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType> { public: ///@name Type Definitions ///@{ /// Pointer definition of ScalingSolver KRATOS_CLASS_POINTER_DEFINITION(ScalingSolver); /// Definition of the base type typedef LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType> BaseType; /// The definition of the spaces (sparse matrix) typedef typename TSparseSpaceType::MatrixType SparseMatrixType; /// The definition of the spaces (vector) typedef typename TSparseSpaceType::VectorType VectorType; /// The definition of the spaces (dense matrix) typedef typename TDenseSpaceType::MatrixType DenseMatrixType; /// The definition of the linear solver factory type typedef LinearSolverFactory<TSparseSpaceType,TDenseSpaceType> LinearSolverFactoryType; /// The index type definition to be consistent typedef typename TSparseSpaceType::IndexType IndexType; ///@} ///@name Life Cycle ///@{ /// Default constructor. ScalingSolver() { } /** * @brief Constructor without parameters * @param pLinearSolver The linear solver to be scaled * @param SymmetricScaling If the scaling is symmetric (true by default) */ ScalingSolver( typename BaseType::Pointer pLinearSolver, const bool SymmetricScaling = true ) : BaseType (), mpLinearSolver(pLinearSolver), mSymmetricScaling(SymmetricScaling) { } /** * @brief Constructor with parameters * @param ThisParameters The configuration parameters of the linear solver */ ScalingSolver(Parameters ThisParameters) : BaseType () { KRATOS_TRY KRATOS_ERROR_IF_NOT(ThisParameters.Has("solver_type")) << "Solver_type must be specified to construct the ScalingSolver" << std::endl; mpLinearSolver = LinearSolverFactoryType().Create(ThisParameters); mSymmetricScaling = ThisParameters.Has("symmetric_scaling") ? ThisParameters["symmetric_scaling"].GetBool() : true; KRATOS_CATCH("") } /// Copy constructor. ScalingSolver(const ScalingSolver& Other) : BaseType(Other) {} /// Destructor. ~ScalingSolver() override {} ///@} ///@name Operators ///@{ /// Assignment operator. ScalingSolver& operator=(const ScalingSolver& Other) { BaseType::operator=(Other); return *this; } ///@} ///@name Operations ///@{ /** Some solvers may require a minimum degree of knowledge of the structure of the matrix. To make an example * when solving a mixed u-p problem, it is important to identify the row associated to v and p. * another example is the automatic prescription of rotation null-space for smoothed-aggregation solvers * which require knowledge on the spatial position of the nodes associated to a given dof. * This function tells if the solver requires such data */ bool AdditionalPhysicalDataIsNeeded() override { return mpLinearSolver->AdditionalPhysicalDataIsNeeded(); } /** Some solvers may require a minimum degree of knowledge of the structure of the matrix. To make an example * when solving a mixed u-p problem, it is important to identify the row associated to v and p. * another example is the automatic prescription of rotation null-space for smoothed-aggregation solvers * which require knowledge on the spatial position of the nodes associated to a given dof. * This function is the place to eventually provide such data */ void ProvideAdditionalData( SparseMatrixType& rA, VectorType& rX, VectorType& rB, typename ModelPart::DofsArrayType& rdof_set, ModelPart& r_model_part ) override { mpLinearSolver->ProvideAdditionalData(rA,rX,rB,rdof_set,r_model_part); } void InitializeSolutionStep (SparseMatrixType& rA, VectorType& rX, VectorType& rB) override { mpLinearSolver->InitializeSolutionStep(rA,rX,rB); } /** This function is designed to be called at the end of the solve step. * for example this is the place to remove any data that we do not want to save for later @param rA. System matrix @param rX. Solution vector. it's also the initial guess for iterative linear solvers. @param rB. Right hand side vector. */ void FinalizeSolutionStep (SparseMatrixType& rA, VectorType& rX, VectorType& rB) override { mpLinearSolver->FinalizeSolutionStep(rA,rX,rB); } /** This function is designed to clean up all internal data in the solver. * Clear is designed to leave the solver object as if newly created. * After a clear a new Initialize is needed */ void Clear() override { mpLinearSolver->Clear(); } /** Normal solve method. Solves the linear system Ax=b and puts the result on SystemVector& rX. rX is also th initial guess for iterative methods. @param rA. System matrix @param rX. Solution vector. it's also the initial guess for iterative linear solvers. @param rB. Right hand side vector. */ bool Solve(SparseMatrixType& rA, VectorType& rX, VectorType& rB) override { if(this->IsNotConsistent(rA, rX, rB)) return false; VectorType scaling_vector(rX.size()); //obtain the scaling matrix GetScalingWeights(rA,scaling_vector); //scale system if(mSymmetricScaling == false) { KRATOS_THROW_ERROR(std::logic_error,"not yet implemented","") } else { #pragma omp parallel for for(int i=0; i< static_cast<int>(scaling_vector.size()); i++) scaling_vector[i] = sqrt(std::abs(scaling_vector[i])); SymmetricScaling(rA,scaling_vector); } //scale RHS #pragma omp parallel for for(int i=0; i< static_cast<int>(scaling_vector.size()); i++) rB[i] /= scaling_vector[i]; //solve the problem bool is_solved = mpLinearSolver->Solve(rA,rX,rB); //backscale the solution if(mSymmetricScaling == true) { #pragma omp parallel for for(int i=0; i< static_cast<int>(scaling_vector.size()); i++) rX[i] /= scaling_vector[i]; } return is_solved; } ///@} ///@name Access ///@{ IndexType GetIterationsNumber() override { return mpLinearSolver->GetIterationsNumber(); } ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { std::stringstream buffer; buffer << "Composite Linear Solver. Uses internally the following linear solver " << mpLinearSolver->Info(); return buffer.str(); } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { BaseType::PrintData(rOStream); } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ typename LinearSolver<TSparseSpaceType, TDenseSpaceType, TReordererType>::Pointer mpLinearSolver; bool mSymmetricScaling; ///@} ///@name Private Operators ///@{ static void SymmetricScaling( SparseMatrixType& A, const VectorType& aux) { //typedef unsigned int size_type; //typedef double value_type; //create partition OpenMPUtils::PartitionVector partition; int number_of_threads = OpenMPUtils::GetNumThreads(); OpenMPUtils::DivideInPartitions(A.size1(),number_of_threads, partition); //parallel loop #pragma omp parallel { int thread_id = OpenMPUtils::ThisThread(); int number_of_rows = partition[thread_id+1] - partition[thread_id]; typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::index_array_type::iterator row_iter_begin = A.index1_data().begin()+partition[thread_id]; typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::index_array_type::iterator index_2_begin = A.index2_data().begin()+*row_iter_begin; typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::value_array_type::iterator value_begin = A.value_data().begin()+*row_iter_begin; perform_matrix_scaling( number_of_rows, row_iter_begin, index_2_begin, value_begin, partition[thread_id], aux ); } } /** * calculates partial product resetting to Zero the output before */ static void perform_matrix_scaling( int number_of_rows, typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::index_array_type::iterator row_begin, typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::index_array_type::iterator index2_begin, typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::value_array_type::iterator value_begin, unsigned int output_begin_index, const VectorType& weights ) { int row_size; typename SparseMatrixType::index_array_type::const_iterator row_it = row_begin; int kkk = output_begin_index; for(int k = 0; k < number_of_rows; k++) { row_size= *(row_it+1)-*row_it; row_it++; const typename TDenseSpaceType::DataType row_weight = weights[kkk++]; for(int i = 0; i<row_size; i++) { const typename TDenseSpaceType::DataType col_weight = weights[*index2_begin]; typename TDenseSpaceType::DataType t = (*value_begin); t /= (row_weight*col_weight); (*value_begin) = t; //check if this is correcct!! value_begin++; index2_begin++; } } } static void GetScalingWeights( const SparseMatrixType& A, VectorType& aux) { //typedef unsigned int size_type; //typedef double value_type; //create partition OpenMPUtils::PartitionVector partition; int number_of_threads = OpenMPUtils::GetNumThreads(); OpenMPUtils::DivideInPartitions(A.size1(),number_of_threads, partition); //parallel loop #pragma omp parallel { int thread_id = OpenMPUtils::ThisThread(); int number_of_rows = partition[thread_id+1] - partition[thread_id]; typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::index_array_type::const_iterator row_iter_begin = A.index1_data().begin()+partition[thread_id]; typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::index_array_type::const_iterator index_2_begin = A.index2_data().begin()+*row_iter_begin; typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::value_array_type::const_iterator value_begin = A.value_data().begin()+*row_iter_begin; GS2weights( number_of_rows, row_iter_begin, index_2_begin, value_begin, partition[thread_id], aux ); } } /** * calculates partial product resetting to Zero the output before */ static void GS2weights( int number_of_rows, typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::index_array_type::const_iterator row_begin, typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::index_array_type::const_iterator index2_begin, typename boost::numeric::ublas::compressed_matrix<typename TDenseSpaceType::DataType>::value_array_type::const_iterator value_begin, unsigned int output_begin_index, VectorType& weights ) { int row_size; typename SparseMatrixType::index_array_type::const_iterator row_it = row_begin; int kkk = output_begin_index; for(int k = 0; k < number_of_rows; k++) { row_size= *(row_it+1)-*row_it; row_it++; double t = 0.0; for(int i = 0; i<row_size; i++) { double tmp = std::abs(*value_begin); t += tmp*tmp; value_begin++; } t = sqrt(t); weights[kkk++] = t; } } ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; // Class ScalingSolver ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// input stream function template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType, class TReordererType> inline std::istream& operator >> (std::istream& IStream, ScalingSolver<TSparseSpaceType, TDenseSpaceType, TReordererType>& rThis) { return IStream; } /// output stream function template<class TSparseSpaceType, class TDenseSpaceType, class TPreconditionerType, class TReordererType> inline std::ostream& operator << (std::ostream& OStream, const ScalingSolver<TSparseSpaceType, TDenseSpaceType, TReordererType>& rThis) { rThis.PrintInfo(OStream); OStream << std::endl; rThis.PrintData(OStream); return OStream; } ///@} } // namespace Kratos. #endif // KRATOS_SCALING_SOLVER_H_INCLUDED defined
diskEx.h
#pragma once class DiskProperty { public: DiskProperty(); ~DiskProperty(); void Information(); void calcSigmaGas(PS::F64 rxy); void calcSigmaGas_up(PS::F64 rxy); void calcSigmaGas_fgap(PS::F64 mp_ms, PS::F64 r_h, PS::F64 alpha_vis); void calcMidplaneTemperture(PS::F64 rxy); void calcSoundSpeed(PS::F64 Tmid); void calcScaleHeight(PS::F64 csound, PS::F64 rxy, PS::F64 Msun); void calcrhoGas(PS::F64 SigmaGas, PS::F64 hscale, PS::F64 z2); void calcUvel(PS::F64 hscale_rxy_2,PS::F64 rz_hscale_2,PS::F64 alpha,PS::F64 beta,PS::F64vec pvel,PS::F64vec vkep); static PS::S32 DiskStg; static PS::F64 SigmaGas0_cgs; static PS::F64 SigmaGas0; static PS::F64 Tmid0_K; static PS::F64 Tmid0; static PS::F64 alpha_gas; static PS::F64 beta_gas; static PS::F64 Cd; static PS::F64 mu; static PS::F64 alpha_vis; // Disk parameters PS::F64 SigmaGas; PS::F64 SigmaGas_up; PS::F64 SigmaGas_fgap; PS::F64 rhoGas; PS::F64 Tmid; PS::F64 hscale; PS::F64 csound; PS::F64vec uvel; PS::F64vec vgas; PS::F64 alpha_gas_c; PS::F64 beta_gas_c; PS::F64 eta_gas_c; private: static constexpr PS::F64 m_pi = 3.141592653589793238462643383279502884L; static constexpr PS::F64 L_MKS = 149597870700; static constexpr PS::F64 L_CGS = 14959787070000; static constexpr PS::F64 M_MKS = 1.9884e30; static constexpr PS::F64 M_CGS = 1.9884e33; static constexpr PS::F64 T = 365.25*24.*60.*60./(2.*m_pi); static constexpr PS::F64 k_B = 1.380649e-16 /(M_CGS*L_CGS*L_CGS)*T*T; static constexpr PS::F64 N_A = 6.022140857e23; static constexpr PS::F64 m_H = 1./N_A /M_CGS; }; DiskProperty::DiskProperty() { } DiskProperty::~DiskProperty() { } void DiskProperty::Information() { std::cout << std::endl; std::cout << "--------------------------------" << std::endl; std::cout << "SigmaGas0_cgs = " << SigmaGas0_cgs << std::endl; std::cout << "Tmid0_K = " << Tmid0_K << std::endl; std::cout << "alpha_gas = " << alpha_gas << std::endl; std::cout << "beta_gas = " << beta_gas << std::endl; std::cout << "Cd = " << Cd << std::endl; std::cout << "mu = " << mu << std::endl; std::cout << "--------------------------------" << std::endl; std::cout << std::endl; std::cout << std::endl; } void DiskProperty::calcSigmaGas(PS::F64 rxy) { SigmaGas = SigmaGas0 *pow(rxy, -alpha_gas); } void DiskProperty::calcSigmaGas_up(PS::F64 rxy) { SigmaGas_up = SigmaGas0 *pow(rxy, -alpha_gas); } void DiskProperty::calcSigmaGas_fgap(PS::F64 mp_ms, PS::F64 r_h, PS::F64 alpha_vis) { PS::F64 K=mp_ms*mp_ms *r_h*r_h*r_h*r_h*r_h /alpha_vis; SigmaGas_fgap = 1./(1.+0.04*K); } void DiskProperty::calcMidplaneTemperture(PS::F64 rxy) { Tmid = Tmid0 *pow(rxy, -beta_gas); } void DiskProperty::calcSoundSpeed(PS::F64 Tmid) { csound = sqrt(k_B * Tmid / (mu * m_H)); } void DiskProperty::calcScaleHeight(PS::F64 csound, PS::F64 rxy, PS::F64 Msun) { hscale = csound*sqrt(rxy*rxy*rxy/Msun); } void DiskProperty::calcrhoGas( PS::F64 SigmaGas, PS::F64 hscale, PS::F64 z2 ) { rhoGas = SigmaGas/(sqrt(2.*m_pi)*hscale) *exp(-z2/(2.*hscale*hscale)); } void DiskProperty::calcUvel( PS::F64 hscale_rxy_2, PS::F64 rz_hscale_2, PS::F64 alpha, PS::F64 beta, PS::F64vec pvel, PS::F64vec vkep ) { eta_gas_c = 0.5e0 *hscale_rxy_2 *(1.5e0*(1.-rz_hscale_2) +alpha +beta *(1. +rz_hscale_2)); vgas = (1.0 - eta_gas_c)*vkep; uvel = pvel - vgas; } PS::S32 DiskProperty::DiskStg = 0; PS::F64 DiskProperty::SigmaGas0_cgs = 1.7e3; PS::F64 DiskProperty::SigmaGas0 = 1.7e3*(1./DiskProperty::M_CGS)/pow(1./DiskProperty::L_CGS, 2.); PS::F64 DiskProperty::Tmid0_K = 280.; PS::F64 DiskProperty::Tmid0 = Tmid0_K; PS::F64 DiskProperty::alpha_gas = 1.5e0; PS::F64 DiskProperty::beta_gas = 0.5e0; PS::F64 DiskProperty::Cd = 1.e0; PS::F64 DiskProperty::mu = 1.e0; PS::F64 DiskProperty::alpha_vis = 1.e-3; class DragForce { public: DragForce(); ~DragForce(); // Aerodynamic Drags PS::F64vec calcAerodynamicGasDrag_Adachi1976(PS::F64vec u, PS::F64 mass, PS::F64 radius, PS::F64 rho_gas, PS::F64 coeff); // Tidal Drags PS::F64vec calcTidalDragAxi_usingTimescale(PS::F64vec vp, PS::F64 tau_axi); PS::F64vec calcTidalDragEcc_usingTimescale(PS::F64vec rp, PS::F64vec vp, PS::F64 tau_ecc); PS::F64vec calcTidalDragInc_usingTimescale(PS::F64 vz, PS::F64 tau_inc); // Timescales PS::F64 calc_TauTideKanagawa2018(PS::F64 m, PS::F64 ms, PS::F64 r, PS::F64 Sigma, PS::F64 hs); PS::F64 calc_TauWave_Tanaka2002(PS::F64 m, PS::F64 ms, PS::F64 r, PS::F64 Sigma, PS::F64 hs); PS::F64 calc_TauTideAxi_Cresswell2008(PS::F64 twave, PS::F64 r_h, PS::F64 e, PS::F64 i, PS::F64 alpha); PS::F64 calc_TauTideEcc_Cresswell2008(PS::F64 twave, PS::F64 r_h, PS::F64 e, PS::F64 i); PS::F64 calc_TauTideInc_Cresswell2008(PS::F64 twave, PS::F64 r_h, PS::F64 e, PS::F64 i); template <class Tpsys> void calcGasDrag(Tpsys & pp, PS::F64 time); static PS::S32 setting; private: static constexpr PS::F64 m_pi = 3.141592653589793238462643383279502884L; static constexpr PS::F64 L_MKS = 149597870700; static constexpr PS::F64 L_CGS = 14959787070000; static constexpr PS::F64 M_MKS = 1.9884e30; static constexpr PS::F64 M_CGS = 1.9884e33; static constexpr PS::F64 T = 365.25*24.*60.*60./(2.*m_pi); static constexpr PS::F64 c_mig = 2.; }; DragForce::DragForce() { if ( PS::Comm::getRank() == 0 ) { std::cout << std::endl; std::cout << std::endl; std::cout << "--------------------------------" << std::endl; std::cout << "Use DragForce class: " << std::endl << "Modle id setting is set as " << setting << std::endl << std::endl; std::cout << "--------------------------------" << std::endl; std::cout << std::endl; std::cout << std::endl; } } DragForce::~DragForce() { } template <class Tpsys> void DragForce::calcGasDrag(Tpsys & pp, PS::F64 time) { const PS::S32 n_loc = pp.getNumberOfParticleLocal(); // Number of Particles in one MPI process #pragma omp parallel for for(PS::S32 i=0; i<n_loc; i++){ // PS::F64vec fdrag_aero=0.; PS::F64vec fdrag_tide=0.; PS::F64vec fdrag=0.; // Calculate position in the protoplanetary disk PS::F64 rxy2 = pp[i].pos.x*pp[i].pos.x + pp[i].pos.y*pp[i].pos.y; PS::F64 rxy_inv = 1./sqrt(rxy2); PS::F64 rxy = rxy2 * rxy_inv; PS::F64 rz2 = pp[i].pos.z*pp[i].pos.z; PS::F64 rz = sqrt(rz2); // generate DiskProperty object DiskProperty disk; disk.calcMidplaneTemperture(rxy); disk.calcSoundSpeed(disk.Tmid); disk.calcScaleHeight(disk.csound,rxy,FP_t::m_sun); // FP_t::m_sun is the mass of central star disk.calcSigmaGas_up(rxy); // switch (setting) { case 0: // aerodynamic gas drag { disk.SigmaGas = disk.SigmaGas_up; disk.calcrhoGas(disk.SigmaGas,disk.hscale,rz2); // get disk gas velocity disk.alpha_gas_c = disk.alpha_gas; disk.beta_gas_c = disk.beta_gas; PS::F64 hscae2 = disk.hscale*disk.hscale; PS::F64 hscale_rxy_2 = hscae2 *rxy_inv *rxy_inv; PS::F64 rz_hscale_2 = rz2/hscae2; PS::F64vec ev(-pp[i].pos.y*rxy_inv, pp[i].pos.x*rxy_inv, 0.0); PS::F64vec vkep = sqrt(FP_t::m_sun * rxy_inv) * ev; // FP_t::m_sun is the mass of central star disk.calcUvel(hscale_rxy_2,rz_hscale_2,disk.alpha_gas_c,disk.beta_gas_c,pp[i].vel,vkep); // fdrag_aero = calcAerodynamicGasDrag_Adachi1976(disk.uvel,pp[i].mass,pp[i].r_planet,disk.rhoGas,0.5e0*disk.Cd*m_pi); fdrag = fdrag_aero; break; } case 1: // tidal gas drag by Kanagawa+2018 { // surface density at gap bottom PS::F64 m_ms = pp[i].mass/FP_t::m_sun; PS::F64 r_hscale = rxy/disk.hscale; disk.calcSigmaGas_fgap(m_ms, r_hscale, disk.alpha_vis); disk.SigmaGas = disk.SigmaGas_fgap *disk.SigmaGas_up; // PS::F64 tau_tide_axi = calc_TauTideKanagawa2018(pp[i].mass, FP_t::m_sun, rxy, disk.SigmaGas, disk.hscale); // std::cout << tau_tide_axi << std::endl; fdrag_aero = 0.; fdrag_tide += calcTidalDragAxi_usingTimescale(pp[i].vel,tau_tide_axi); fdrag = fdrag_tide; break; } case 2: // tidal gas drag by Tanaka+2002, Tanaka & Ward 2004, Cresswell & Nelson 2008 with reduction by Kanagawa+2018 { // surface density at gap bottom PS::F64 m_ms = pp[i].mass/FP_t::m_sun; PS::F64 r_hscale = rxy/disk.hscale; disk.calcSigmaGas_fgap(m_ms, r_hscale, disk.alpha_vis); disk.SigmaGas = disk.SigmaGas_fgap *disk.SigmaGas_up; disk.alpha_gas_c = disk.alpha_gas; // PS::F64 tau_wave = calc_TauWave_Tanaka2002(pp[i].mass, FP_t::m_sun, rxy, disk.SigmaGas, disk.hscale); PS::F64 tau_tide_axi = calc_TauTideAxi_Cresswell2008(tau_wave, r_hscale, pp[i].getEccentricity(), pp[i].getInclination(), disk.alpha_gas_c); PS::F64 tau_tide_ecc = calc_TauTideEcc_Cresswell2008(tau_wave, r_hscale, pp[i].getEccentricity(), pp[i].getInclination()); PS::F64 tau_tide_inc = calc_TauTideInc_Cresswell2008(tau_wave, r_hscale, pp[i].getEccentricity(), pp[i].getInclination()); fdrag_tide += calcTidalDragAxi_usingTimescale(pp[i].vel,tau_tide_axi); fdrag_tide += calcTidalDragEcc_usingTimescale(pp[i].pos, pp[i].vel, tau_tide_ecc); fdrag_tide += calcTidalDragInc_usingTimescale(pp[i].vel.z, tau_tide_inc); fdrag = fdrag_tide; break; } default: { break; } } pp[i].acc_gd = 0.; pp[i].acc_gd += fdrag; pp[i].acc += pp[i].acc_gd; } } PS::F64vec DragForce::calcAerodynamicGasDrag_Adachi1976 ( PS::F64vec u, PS::F64 mass, PS::F64 radius, PS::F64 rho_gas, PS::F64 coeff // pi * Cd / 2 ) { return -coeff *radius *radius *rho_gas *sqrt(u*u) *u /mass; } PS::F64vec DragForce::calcTidalDragAxi_usingTimescale ( PS::F64vec vp, PS::F64 tau_axi ) { return -0.5*vp/tau_axi; } PS::F64vec DragForce::calcTidalDragEcc_usingTimescale ( PS::F64vec rp, PS::F64vec vp, PS::F64 tau_ecc ) { return -2.*(vp*rp)*rp/((rp*rp)*tau_ecc); } PS::F64vec DragForce::calcTidalDragInc_usingTimescale ( PS::F64 vz, PS::F64 tau_inc ) { PS::F64vec veck =0.; veck.z=1.; return -vz/tau_inc *veck ; } PS::F64 DragForce::calc_TauTideKanagawa2018(PS::F64 m, PS::F64 ms, PS::F64 r, PS::F64 Sigma, PS::F64 hs) { return ( pow(ms,1.5)*hs*hs )/(2.*c_mig*m*Sigma*pow(r,2.5)); } PS::F64 DragForce::calc_TauWave_Tanaka2002(PS::F64 m, PS::F64 ms, PS::F64 r, PS::F64 Sigma, PS::F64 hs) { return ( pow(ms,1.5)*hs*hs*hs*hs )/(m*Sigma*pow(r,4.5)); } PS::F64 DragForce::calc_TauTideAxi_Cresswell2008(PS::F64 twave, PS::F64 r_h, PS::F64 e, PS::F64 i, PS::F64 alpha) { PS::F64 e_rh = e*r_h; PS::F64 i_rh = i*r_h; PS::F64 Pe = (1.+pow(0.444*e_rh,1.2) +pow(0.352*e_rh,6.))/(1.-pow(0.495*e_rh,4.)); // Note: factor 2 is not in this equation. In calcDrag return twave/(2.7+1.1*alpha) *r_h*r_h *( Pe+Pe/abs(Pe)* ( 0.070*i_rh +0.085*i_rh*i_rh*i_rh*i_rh -0.080*e_rh*i_rh*i_rh ) ); } PS::F64 DragForce::calc_TauTideEcc_Cresswell2008(PS::F64 twave, PS::F64 r_h, PS::F64 e, PS::F64 i) { PS::F64 e_rh = e*r_h; PS::F64 i_rh = i*r_h; return 1.28*twave *( 1.-0.14*e_rh*e_rh +0.06*e_rh*e_rh*e_rh +0.18*e_rh*i_rh*i_rh ); } PS::F64 DragForce::calc_TauTideInc_Cresswell2008(PS::F64 twave, PS::F64 r_h, PS::F64 e, PS::F64 i) { PS::F64 e_rh = e*r_h; PS::F64 i_rh = i*r_h; return 1.84*twave *( 1.-0.30*i_rh*i_rh +0.24*i_rh*i_rh*i_rh +0.14*e_rh*e_rh*i_rh ); } PS::S32 DragForce::setting = 0;
strm.c
/*-----------------------------------------------------------------------*/ /* Program: STREAM */ /* Revision: $Id: stream.c,v 5.10 2013/01/17 16:01:06 mccalpin Exp mccalpin $ */ /* Original code developed by John D. McCalpin */ /* Programmers: John D. McCalpin */ /* Joe R. Zagar */ /* */ /* This program measures memory transfer rates in MB/s for simple */ /* computational kernels coded in C. */ /*-----------------------------------------------------------------------*/ /* Copyright 1991-2013: John D. McCalpin */ /*-----------------------------------------------------------------------*/ /* License: */ /* 1. You are free to use this program and/or to redistribute */ /* this program. */ /* 2. You are free to modify this program for your own use, */ /* including commercial use, subject to the publication */ /* restrictions in item 3. */ /* 3. You are free to publish results obtained from running this */ /* program, or from works that you derive from this program, */ /* with the following limitations: */ /* 3a. In order to be referred to as "STREAM benchmark results", */ /* published results must be in conformance to the STREAM */ /* Run Rules, (briefly reviewed below) published at */ /* http://www.cs.virginia.edu/stream/ref.html */ /* and incorporated herein by reference. */ /* As the copyright holder, John McCalpin retains the */ /* right to determine conformity with the Run Rules. */ /* 3b. Results based on modified source code or on runs not in */ /* accordance with the STREAM Run Rules must be clearly */ /* labelled whenever they are published. Examples of */ /* proper labelling include: */ /* "tuned STREAM benchmark results" */ /* "based on a variant of the STREAM benchmark code" */ /* Other comparable, clear, and reasonable labelling is */ /* acceptable. */ /* 3c. Submission of results to the STREAM benchmark web site */ /* is encouraged, but not required. */ /* 4. Use of this program or creation of derived works based on this */ /* program constitutes acceptance of these licensing restrictions. */ /* 5. Absolutely no warranty is expressed or implied. */ /*-----------------------------------------------------------------------*/ #include <stdio.h> #include <unistd.h> #include <math.h> #include <float.h> #include <limits.h> /*#include <sys/time.h>*/ // Example main arguments // #define MARGS "" #include "lime.h" /*----------------------------------------------------------------------- * INSTRUCTIONS: * * 1) STREAM requires different amounts of memory to run on different * systems, depending on both the system cache size(s) and the * granularity of the system timer. * You should adjust the value of 'STREAM_ARRAY_SIZE' (below) * to meet *both* of the following criteria: * (a) Each array must be at least 4 times the size of the * available cache memory. I don't worry about the difference * between 10^6 and 2^20, so in practice the minimum array size * is about 3.8 times the cache size. * Example 1: One Xeon E3 with 8 MB L3 cache * STREAM_ARRAY_SIZE should be >= 4 million, giving * an array size of 30.5 MB and a total memory requirement * of 91.5 MB. * Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP) * STREAM_ARRAY_SIZE should be >= 20 million, giving * an array size of 153 MB and a total memory requirement * of 458 MB. * (b) The size should be large enough so that the 'timing calibration' * output by the program is at least 20 clock-ticks. * Example: most versions of Windows have a 10 millisecond timer * granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds. * If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec. * This means the each array must be at least 1 GB, or 128M elements. * * Version 5.10 increases the default array size from 2 million * elements to 10 million elements in response to the increasing * size of L3 caches. The new default size is large enough for caches * up to 20 MB. * Version 5.10 changes the loop index variables from "register int" * to "ssize_t", which allows array indices >2^32 (4 billion) * on properly configured 64-bit systems. Additional compiler options * (such as "-mcmodel=medium") may be required for large memory runs. * * Array size can be set at compile time without modifying the source * code for the (many) compilers that support preprocessor definitions * on the compile line. E.g., * gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M * will override the default size of 10M with a new size of 100M elements * per array. */ #ifndef STREAM_ARRAY_SIZE #define STREAM_ARRAY_SIZE 10000000 #endif /* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result * for any iteration after the first, therefore the minimum value * for NTIMES is 2. * There are no rules on maximum allowable values for NTIMES, but * values larger than the default are unlikely to noticeably * increase the reported performance. * NTIMES can also be set on the compile line without changing the source * code using, for example, "-DNTIMES=7". */ #ifdef NTIMES #if NTIMES<=1 #define NTIMES 10 #endif #endif #ifndef NTIMES #define NTIMES 10 #endif /* Users are allowed to modify the "OFFSET" variable, which *may* change the * relative alignment of the arrays (though compilers may change the * effective offset by making the arrays non-contiguous on some systems). * Use of non-zero values for OFFSET can be especially helpful if the * STREAM_ARRAY_SIZE is set to a value close to a large power of 2. * OFFSET can also be set on the compile line without changing the source * code using, for example, "-DOFFSET=56". */ #ifndef OFFSET #define OFFSET 0 #endif /* * 3) Compile the code with optimization. Many compilers generate * unreasonably bad code before the optimizer tightens things up. * If the results are unreasonably good, on the other hand, the * optimizer might be too smart for me! * * For a simple single-core version, try compiling with: * cc -O stream.c -o stream * This is known to work on many, many systems.... * * To use multiple cores, you need to tell the compiler to obey the OpenMP * directives in the code. This varies by compiler, but a common example is * gcc -O -fopenmp stream.c -o stream_omp * The environment variable OMP_NUM_THREADS allows runtime control of the * number of threads/cores used when the resulting "stream_omp" program * is executed. * * To run with single-precision variables and arithmetic, simply add * -DSTREAM_TYPE=float * to the compile line. * Note that this changes the minimum array sizes required --- see (1) above. * * The preprocessor directive "TUNED" does not do much -- it simply causes the * code to call separate functions to execute each kernel. Trivial versions * of these functions are provided, but they are *not* tuned -- they just * provide predefined interfaces to be replaced with tuned code. * * * 4) Optional: Mail the results to mccalpin@cs.virginia.edu * Be sure to include info that will help me understand: * a) the computer hardware configuration (e.g., processor model, memory type) * b) the compiler name/version and compilation flags * c) any run-time information (such as OMP_NUM_THREADS) * d) all of the output from the test case. * * Thanks! * *-----------------------------------------------------------------------*/ #define HLINE "-------------------------------------------------------------\n" #ifndef MIN #define MIN(x,y) ((x)<(y)?(x):(y)) #endif #ifndef MAX #define MAX(x,y) ((x)>(y)?(x):(y)) #endif #ifndef STREAM_TYPE #define STREAM_TYPE double #endif #ifdef DYN static STREAM_TYPE *a, *b, *c; #else static STREAM_TYPE a[STREAM_ARRAY_SIZE+OFFSET], b[STREAM_ARRAY_SIZE+OFFSET], c[STREAM_ARRAY_SIZE+OFFSET]; #endif static double avgtime[4] = {0}, maxtime[4] = {0}, mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX}; static char *label[4] = {"Copy: ", "Scale: ", "Add: ", "Triad: "}; static double bytes[4] = { 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE }; extern int checktick(); extern double mysecond(); extern void checkSTREAMresults(); #ifdef TUNED extern void tuned_STREAM_Copy(); extern void tuned_STREAM_Scale(STREAM_TYPE scalar); extern void tuned_STREAM_Add(); extern void tuned_STREAM_Triad(STREAM_TYPE scalar); #endif #ifdef _OPENMP extern int omp_get_num_threads(); #endif int MAIN(int argc, char *argv[]) { int BytesPerWord; int k; ssize_t j; STREAM_TYPE scalar; double times[4][NTIMES]; /* --- SETUP --- determine precision and check timing --- */ printf(HLINE); printf("STREAM version $Revision: 5.10 $\n"); printf(HLINE); BytesPerWord = sizeof(STREAM_TYPE); printf("This system uses %d bytes per array element.\n", BytesPerWord); printf(HLINE); #ifdef N printf("***** WARNING: ******\n"); printf(" It appears that you set the preprocessor variable N when compiling this code.\n"); printf(" This version of the code uses the preprocesor variable STREAM_ARRAY_SIZE to control the array size\n"); printf(" Reverting to default value of STREAM_ARRAY_SIZE=%llu\n",(unsigned long long) STREAM_ARRAY_SIZE); printf("***** WARNING: ******\n"); #endif #ifdef DYN a = NALLOC(STREAM_TYPE, STREAM_ARRAY_SIZE+OFFSET); b = NALLOC(STREAM_TYPE, STREAM_ARRAY_SIZE+OFFSET); c = NALLOC(STREAM_TYPE, STREAM_ARRAY_SIZE+OFFSET); #endif printf("Array size = %llu (elements), Offset = %d (elements)\n" , (unsigned long long) STREAM_ARRAY_SIZE, OFFSET); printf("Array addr %p %p %p\n", a, b, c); printf("Memory per array = %.1f MiB (= %.1f GiB).\n", BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0), BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0/1024.0)); printf("Total memory required = %.1f MiB (= %.1f GiB).\n", (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.), (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024./1024.)); printf("Each kernel will be executed %d times.\n", NTIMES); printf(" The *best* time for each kernel (excluding the first iteration)\n"); printf(" will be used to compute the reported bandwidth.\n"); #ifdef _OPENMP printf(HLINE); #pragma omp parallel { #pragma omp master { k = omp_get_num_threads(); printf ("Number of Threads requested = %i\n",k); } } #endif #ifdef _OPENMP k = 0; #pragma omp parallel #pragma omp atomic k++; printf ("Number of Threads counted = %i\n",k); #endif /* Get initial value for system clock. */ #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<STREAM_ARRAY_SIZE; j++) { a[j] = 1.0; b[j] = 2.0; c[j] = 0.0; } printf(HLINE); #ifdef TIME_CHECK { int quantum; double t; if ((quantum = checktick()) >= 1) printf("Your clock granularity/precision appears to be " "%d microseconds.\n", quantum); else { printf("Your clock granularity appears to be " "less than one microsecond.\n"); quantum = 1; } t = mysecond(); #ifdef _OPENMP #pragma omp parallel for #endif for (j = 0; j < STREAM_ARRAY_SIZE; j++) a[j] = 2.0E0 * a[j]; t = 1.0E6 * (mysecond() - t); printf("Each test below will take on the order" " of %d microseconds.\n", (int) t ); printf(" (= %d clock ticks)\n", (int) (t/quantum) ); printf("Increase the size of the arrays if this shows that\n"); printf("you are not getting at least 20 clock ticks per test.\n"); printf(HLINE); printf("WARNING -- The above is only a rough guideline.\n"); printf("For best results, please be sure you know the\n"); printf("precision of your system timer.\n"); printf(HLINE); } #endif /* TIME_CHECK */ /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ CLOCKS_EMULATE // CACHE_BARRIER(NULL) TRACE_START STATS_START scalar = 3.0; for (k=0; k<NTIMES; k++) { times[0][k] = mysecond(); #ifdef TUNED tuned_STREAM_Copy(); #else #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]; #endif times[0][k] = mysecond() - times[0][k]; times[1][k] = mysecond(); #ifdef TUNED tuned_STREAM_Scale(scalar); #else #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<STREAM_ARRAY_SIZE; j++) b[j] = scalar*c[j]; #endif times[1][k] = mysecond() - times[1][k]; times[2][k] = mysecond(); #ifdef TUNED tuned_STREAM_Add(); #else #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]+b[j]; #endif times[2][k] = mysecond() - times[2][k]; times[3][k] = mysecond(); #ifdef TUNED tuned_STREAM_Triad(scalar); #else #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<STREAM_ARRAY_SIZE; j++) a[j] = b[j]+scalar*c[j]; #endif times[3][k] = mysecond() - times[3][k]; } // CACHE_BARRIER(NULL) STATS_STOP TRACE_STOP CLOCKS_NORMAL /* --- SUMMARY --- */ for (k=1; k<NTIMES; k++) /* note -- skip first iteration */ { for (j=0; j<4; j++) { avgtime[j] = avgtime[j] + times[j][k]; mintime[j] = MIN(mintime[j], times[j][k]); maxtime[j] = MAX(maxtime[j], times[j][k]); } } printf("Function Best Rate MB/s Avg time Min time Max time\n"); for (j=0; j<4; j++) { avgtime[j] = avgtime[j]/(double)(NTIMES-1); printf("%s%12.1f %11.6f %11.6f %11.6f\n", label[j], 1.0E-06 * bytes[j]/mintime[j], avgtime[j], mintime[j], maxtime[j]); } printf(HLINE); /* --- Check Results --- */ checkSTREAMresults(); printf(HLINE); STATS_PRINT #ifdef DYN NFREE(a); NFREE(b); NFREE(c); #endif TRACE_CAP return 0; } #define M 20 int checktick() { int i, minDelta, Delta; double t1, t2, timesfound[M]; /* Collect a sequence of M unique time values from the system. */ for (i = 0; i < M; i++) { t1 = mysecond(); while( ((t2=mysecond()) - t1) < 1.0E-6 ) ; timesfound[i] = t1 = t2; } /* * Determine the minimum difference between these M values. * This result will be our estimate (in microseconds) for the * clock granularity. */ minDelta = 1000000; for (i = 1; i < M; i++) { Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1])); minDelta = MIN(minDelta, MAX(Delta,0)); } return(minDelta); } /* A gettimeofday routine to give access to the wall clock timer on most UNIX-like systems. */ double mysecond() { #if 0 struct timeval tp; int i; i = gettimeofday(&tp,NULL); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); #else tick_t t; tget(t); return(tvesec(tval(t))); #endif } #ifndef abs #define abs(a) ((a) >= 0 ? (a) : -(a)) #endif void checkSTREAMresults() { STREAM_TYPE aj,bj,cj,scalar; STREAM_TYPE aSumErr,bSumErr,cSumErr; STREAM_TYPE aAvgErr,bAvgErr,cAvgErr; double epsilon; ssize_t j; int k,ierr,err; /* reproduce initialization */ aj = 1.0; bj = 2.0; cj = 0.0; #ifdef TIME_CHECK /* a[] is modified during timing check */ aj = 2.0E0 * aj; #endif /* now execute timing loop */ scalar = 3.0; for (k=0; k<NTIMES; k++) { cj = aj; bj = scalar*cj; cj = aj+bj; aj = bj+scalar*cj; } /* accumulate deltas between observed and expected results */ aSumErr = 0.0; bSumErr = 0.0; cSumErr = 0.0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { aSumErr += abs(a[j] - aj); bSumErr += abs(b[j] - bj); cSumErr += abs(c[j] - cj); // if (j == 417) printf("Index 417: c[j]: %f, cj: %f\n",c[j],cj); // MCCALPIN } aAvgErr = aSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; bAvgErr = bSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; cAvgErr = cSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; if (sizeof(STREAM_TYPE) == 4) { epsilon = 1.e-6; } else if (sizeof(STREAM_TYPE) == 8) { epsilon = 1.e-13; } else { printf("WEIRD: sizeof(STREAM_TYPE) = %lu\n",(unsigned long)sizeof(STREAM_TYPE)); epsilon = 1.e-6; } err = 0; if (abs(aAvgErr/aj) > epsilon) { err++; printf ("Failed Validation on array a[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",aj,aAvgErr,abs(aAvgErr)/aj); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(a[j]/aj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array a: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,aj,a[j],abs((aj-a[j])/aAvgErr)); } #endif } } printf(" For array a[], %d errors were found.\n",ierr); } if (abs(bAvgErr/bj) > epsilon) { err++; printf ("Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",bj,bAvgErr,abs(bAvgErr)/bj); printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(b[j]/bj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array b: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,bj,b[j],abs((bj-b[j])/bAvgErr)); } #endif } } printf(" For array b[], %d errors were found.\n",ierr); } if (abs(cAvgErr/cj) > epsilon) { err++; printf ("Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",cj,cAvgErr,abs(cAvgErr)/cj); printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(c[j]/cj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array c: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,cj,c[j],abs((cj-c[j])/cAvgErr)); } #endif } } printf(" For array c[], %d errors were found.\n",ierr); } if (err == 0) { printf ("Solution Validates: avg error less than %e on all three arrays\n",epsilon); } #ifdef VERBOSE printf ("Results Validation Verbose Results: \n"); printf (" Expected a(1), b(1), c(1): %f %f %f \n",aj,bj,cj); printf (" Observed a(1), b(1), c(1): %f %f %f \n",a[1],b[1],c[1]); printf (" Rel Errors on a, b, c: %e %e %e \n",abs(aAvgErr/aj),abs(bAvgErr/bj),abs(cAvgErr/cj)); #endif } #ifdef TUNED /* stubs for "tuned" versions of the kernels */ void tuned_STREAM_Copy() { ssize_t j; #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]; } void tuned_STREAM_Scale(STREAM_TYPE scalar) { ssize_t j; #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<STREAM_ARRAY_SIZE; j++) b[j] = scalar*c[j]; } void tuned_STREAM_Add() { ssize_t j; #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]+b[j]; } void tuned_STREAM_Triad(STREAM_TYPE scalar) { ssize_t j; #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<STREAM_ARRAY_SIZE; j++) a[j] = b[j]+scalar*c[j]; } /* end of stubs for the "tuned" versions of the kernels */ #endif
GB_unaryop__minv_uint8_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint8_int64 // op(A') function: GB_tran__minv_uint8_int64 // C type: uint8_t // A type: int64_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 8) #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 8) ; // casting #define GB_CASTING(z, x) \ uint8_t z = (uint8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT8 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint8_int64 ( uint8_t *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint8_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
a4.c
#include "omp.h" void axpy(int N, float *Y, float *X, float a) { int i; #pragma omp target exit data map(from:X[0:N]) #pragma omp parallel for for (i = 0; i < N; ++i) Y[i] += a * X[i]; }
DRB081-func-arg-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* A function argument passed by value should be private inside the function. Variable i is read only. */ #include "omprace.h" #include <omp.h> #include<stdio.h> #include<assert.h> /* argument pass-by-value */ void f1(int q) { q += 1; } int main() { omprace_init(); int i=0; #pragma omp parallel { f1(i); } assert (i==0); printf ("i=%d\n",i); omprace_fini(); return 0; }
DRB005-indirectaccess1-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This program is extracted from a real application at LLNL. Two pointers (xa1 and xa2) have a pair of values with a distance of 12. They are used as start base addresses for two 1-D arrays. Their index set has two indices with distance of 12: 999 +12 = 1011. So there is loop carried dependence. However, having loop carried dependence does not mean data races will always happen. The iterations with loop carried dependence must be scheduled to different threads in order for data races to happen. In this example, we use schedule(static,1) to increase the chance that the dependent loop iterations will be scheduled to different threads. Data race pair: xa1[idx]@128:5 vs. xa2[idx]@129:5 */ #include <assert.h> #include <stdio.h> #include <stdlib.h> #define N 180 int indexSet[N] = { 521, 523, 525, 527, 529, 531, 547, 549, 551, 553, 555, 557, 573, 575, 577, 579, 581, 583, 599, 601, 603, 605, 607, 609, 625, 627, 629, 631, 633, 635, 651, 653, 655, 657, 659, 661, 859, 861, 863, 865, 867, 869, 885, 887, 889, 891, 893, 895, 911, 913, 915, 917, 919, 923, // change original 921 to 923 = 911+12 937, 939, 941, 943, 945, 947, 963, 965, 967, 969, 971, 973, 989, 991, 993, 995, 997, 999, 1197, 1199, 1201, 1203, 1205, 1207, 1223, 1225, 1227, 1229, 1231, 1233, 1249, 1251, 1253, 1255, 1257, 1259, 1275, 1277, 1279, 1281, 1283, 1285, 1301, 1303, 1305, 1307, 1309, 1311, 1327, 1329, 1331, 1333, 1335, 1337, 1535, 1537, 1539, 1541, 1543, 1545, 1561, 1563, 1565, 1567, 1569, 1571, 1587, 1589, 1591, 1593, 1595, 1597, 1613, 1615, 1617, 1619, 1621, 1623, 1639, 1641, 1643, 1645, 1647, 1649, 1665, 1667, 1669, 1671, 1673, 1675, 1873, 1875, 1877, 1879, 1881, 1883, 1899, 1901, 1903, 1905, 1907, 1909, 1925, 1927, 1929, 1931, 1933, 1935, 1951, 1953, 1955, 1957, 1959, 1961, 1977, 1979, 1981, 1983, 1985, 1987, 2003, 2005, 2007, 2009, 2011, 2013}; int main (int argc, char* argv[]) { // max index value is 2013. +1 to ensure a reference like base[2015] // Pointers will never access the same offset as (xa2 = base + 2014). double * base = (double*) malloc(sizeof(double)* (2013+1+2013+1)); if (base == 0) { printf ("Error in malloc(). Aborting ...\n"); return 1; } double * xa1 = base; double * xa2 = xa1 + 2014; int i; // initialize segments touched by indexSet #pragma omp parallel for simd for (i =521; i<= 2025; ++i) { base[i]=0.5*i; } // default static even scheduling may not trigger data race, using static,1 instead. #pragma omp parallel for simd schedule(static,1) for (i =0; i< N; ++i) { int idx = indexSet[i]; xa1[idx]+= 1.0 + i; xa2[idx]+= 3.0 + i; } printf("x1[999]=%f xa2[1285]=%f\n", xa1[999], xa2[1285]); free (base); return 0; }
task_reduction4.c
// RUN: %libomp-compile-and-run // XFAIL: icc // UNSUPPORTED: clang-4, clang-5, clang-6, clang-7, clang-8, clang-9, clang-10 // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8 #include <stdio.h> #include <stdlib.h> int a = 0, b = 1; int main(int argc, char **argv) { #pragma omp parallel reduction(task, +:a) reduction(task, *:b) { #pragma omp single { int i; for (i = 1; i <= 5; ++i) { #pragma omp task in_reduction(+: a) in_reduction(*: b) { a += i; b *= i; } } } } if (a != 15) { fprintf(stderr, "error: a != 15. Instead a = %d\n", a); exit(EXIT_FAILURE); } if (b != 120) { fprintf(stderr, "error: b != 120. Instead b = %d\n", b); exit(EXIT_FAILURE); } return EXIT_SUCCESS; }
findpath.c
/* gcc -fopenmp -g3 -DTEST_FINDPATH findpath.c -o FINDpath -lRNA -lm -I../ -L./ */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <stdlib.h> #include <string.h> #include <limits.h> #include "ViennaRNA/datastructures/basic.h" #include "ViennaRNA/model.h" #include "ViennaRNA/params/basic.h" #include "ViennaRNA/fold.h" #include "ViennaRNA/cofold.h" #include "ViennaRNA/fold_vars.h" #include "ViennaRNA/utils/basic.h" #include "ViennaRNA/utils/strings.h" #include "ViennaRNA/utils/structures.h" #include "ViennaRNA/landscape/findpath.h" #ifdef _OPENMP #include <omp.h> #endif #define LOOP_EN /** * @brief */ typedef struct move { int i; /* i,j>0 insert; i,j<0 delete */ int j; int when; /* 0 if still available, else resulting distance from start */ int E; } move_t; /** * @brief */ typedef struct intermediate { short *pt; /**< @brief pair table */ int Sen; /**< @brief saddle energy so far */ int curr_en; /**< @brief current energy */ move_t *moves; /**< @brief remaining moves to target */ } intermediate_t; /* ################################# # GLOBAL VARIABLES # ################################# */ /* ################################# # PRIVATE VARIABLES # ################################# */ PRIVATE int BP_dist; PRIVATE move_t *path = NULL; PRIVATE int path_fwd; /* 1: s1->s2, else s2 -> s1 */ PRIVATE vrna_fold_compound_t *backward_compat_compound = NULL; #ifdef _OPENMP /* NOTE: all variables are assumed to be uninitialized if they are declared as threadprivate */ #pragma omp threadprivate(BP_dist, path, path_fwd, backward_compat_compound) #endif /* ################################# # PRIVATE FUNCTION DECLARATIONS # ################################# */ PRIVATE move_t * copy_moves(move_t *mvs); PRIVATE int compare_ptable(const void *A, const void *B); PRIVATE int compare_energy(const void *A, const void *B); PRIVATE int compare_moves_when(const void *A, const void *B); PRIVATE void free_intermediate(intermediate_t *i); #ifdef TEST_FINDPATH /* TEST_FINDPATH, COFOLD */ PRIVATE void usage(void); #endif PRIVATE int find_path_once(vrna_fold_compound_t *vc, const char *s1, const char *s2, int maxl, int maxE); PRIVATE int try_moves(vrna_fold_compound_t *vc, intermediate_t c, int maxE, intermediate_t *next, int dist); /* ################################# # BEGIN OF FUNCTION DEFINITIONS # ################################# */ PUBLIC void free_path(vrna_path_t *path) { vrna_path_t *tmp = path; if (tmp) { while (tmp->s) { free(tmp->s); tmp++; } free(path); } } PUBLIC int find_saddle(const char *seq, const char *s1, const char *s2, int width) { int maxE; char *sequence; vrna_fold_compound_t *vc; vrna_md_t md, *md_p; vc = NULL; set_model_details(&md); if (backward_compat_compound) { if (!strcmp(seq, backward_compat_compound->sequence)) { /* check if sequence is the same as before */ md.window_size = backward_compat_compound->length; md.max_bp_span = backward_compat_compound->length; md_p = &(backward_compat_compound->params->model_details); if (!memcmp(&md, md_p, sizeof(vrna_md_t))) /* check if model_details are the same as before */ vc = backward_compat_compound; /* re-use previous vrna_fold_compound_t */ } } if (!vc) { vrna_fold_compound_free(backward_compat_compound); sequence = vrna_cut_point_insert(seq, cut_point); backward_compat_compound = vc = vrna_fold_compound(sequence, &md, VRNA_OPTION_EVAL_ONLY); free(sequence); } maxE = vrna_path_findpath_saddle(vc, s1, s2, width); return maxE; } PUBLIC int vrna_path_findpath_saddle(vrna_fold_compound_t *vc, const char *s1, const char *s2, int width) { return vrna_path_findpath_saddle_ub(vc, s1, s2, width, INT_MAX - 1); } PUBLIC int vrna_path_findpath_saddle_ub(vrna_fold_compound_t *vc, const char *s1, const char *s2, int width, int maxE) { int maxl; const char *tmp; move_t *bestpath = NULL; int dir; path_fwd = dir = 0; maxl = 1; do { int saddleE; path_fwd = !path_fwd; if (maxl > width) maxl = width; if (path) free(path); saddleE = find_path_once(vc, s1, s2, maxl, maxE); if (saddleE < maxE) { maxE = saddleE; if (bestpath) free(bestpath); bestpath = path; path = NULL; dir = path_fwd; } else { free(path); path = NULL; } tmp = s1; s1 = s2; s2 = tmp; maxl *= 2; } while (maxl < 2 * width); /* (re)set some globals */ path = bestpath; path_fwd = dir; return maxE; } PUBLIC vrna_path_t * get_path(const char *seq, const char *s1, const char *s2, int maxkeep) { vrna_path_t *route = NULL; char *sequence = NULL; vrna_fold_compound_t *vc = NULL; vrna_md_t md, *md_p; set_model_details(&md); if (backward_compat_compound) { if (!strcmp(seq, backward_compat_compound->sequence)) { /* check if sequence is the same as before */ md.window_size = backward_compat_compound->length; md.max_bp_span = backward_compat_compound->length; md_p = &(backward_compat_compound->params->model_details); if (!memcmp(&md, md_p, sizeof(vrna_md_t))) /* check if model_details are the same as before */ vc = backward_compat_compound; /* re-use previous vrna_fold_compound_t */ } } if (!vc) { vrna_fold_compound_free(backward_compat_compound); sequence = vrna_cut_point_insert(seq, cut_point); backward_compat_compound = vc = vrna_fold_compound(sequence, &md, VRNA_OPTION_EVAL_ONLY); free(sequence); } route = vrna_path_findpath(vc, s1, s2, maxkeep); return route; } PUBLIC vrna_path_t * vrna_path_findpath(vrna_fold_compound_t *vc, const char *s1, const char *s2, int width) { return vrna_path_findpath_ub(vc, s1, s2, width, INT_MAX - 1); } PUBLIC vrna_path_t * vrna_path_findpath_ub(vrna_fold_compound_t *vc, const char *s1, const char *s2, int width, int maxE) { int E, d; vrna_path_t *route = NULL; E = vrna_path_findpath_saddle_ub(vc, s1, s2, width, maxE); /* did we find a better path than one with saddle maxE? */ if (E < maxE) { route = (vrna_path_t *)vrna_alloc((BP_dist + 2) * sizeof(vrna_path_t)); qsort(path, BP_dist, sizeof(move_t), compare_moves_when); if (path_fwd) { /* memorize start of path */ route[0].s = strdup(s1); route[0].en = vrna_eval_structure(vc, s1); for (d = 0; d < BP_dist; d++) { int i, j; route[d + 1].s = strdup(route[d].s); i = path[d].i; j = path[d].j; if (i < 0) { /* delete */ route[d + 1].s[(-i) - 1] = route[d + 1].s[(-j) - 1] = '.'; } else { route[d + 1].s[i - 1] = '('; route[d + 1].s[j - 1] = ')'; } route[d + 1].en = path[d].E / 100.0; } } else { /* memorize start of path */ route[BP_dist].s = strdup(s2); route[BP_dist].en = vrna_eval_structure(vc, s2); for (d = 0; d < BP_dist; d++) { int i, j; route[BP_dist - d - 1].s = strdup(route[BP_dist - d].s); i = path[d].i; j = path[d].j; if (i < 0) { /* delete */ route[BP_dist - d - 1].s[(-i) - 1] = route[BP_dist - d - 1].s[(-j) - 1] = '.'; } else { route[BP_dist - d - 1].s[i - 1] = '('; route[BP_dist - d - 1].s[j - 1] = ')'; } route[BP_dist - d - 1].en = path[d].E / 100.0; } } #if _DEBUG_FINDPATH_ fprintf(stderr, "\n%s\n%s\n%s\n\n", seq, s1, s2); for (d = 0; d <= BP_dist; d++) fprintf(stderr, "%s %6.2f\n", route[d].s, route[d].en); fprintf(stderr, "%d\n", *num_entry); #endif } free(path); path = NULL; return route; } PRIVATE int try_moves(vrna_fold_compound_t *vc, intermediate_t c, int maxE, intermediate_t *next, int dist) { int *loopidx, len, num_next = 0, en, oldE; move_t *mv; short *pt; len = c.pt[0]; loopidx = vrna_loopidx_from_ptable(c.pt); oldE = c.Sen; for (mv = c.moves; mv->i != 0; mv++) { int i, j; if (mv->when > 0) continue; i = mv->i; j = mv->j; pt = (short *)vrna_alloc(sizeof(short) * (len + 1)); memcpy(pt, c.pt, (len + 1) * sizeof(short)); if (j < 0) { /*it's a delete move */ pt[-i] = 0; pt[-j] = 0; } else { /* insert move */ if ((loopidx[i] == loopidx[j]) && /* i and j belong to same loop */ (pt[i] == 0) && (pt[j] == 0) /* ... and are unpaired */ ) { pt[i] = j; pt[j] = i; } else { free(pt); continue; /* llegal move, try next; */ } } #ifdef LOOP_EN en = c.curr_en + vrna_eval_move_pt(vc, c.pt, i, j); #else en = vrna_eval_structure_pt(vc, pt); #endif if (en < maxE) { next[num_next].Sen = (en > oldE) ? en : oldE; next[num_next].curr_en = en; next[num_next].pt = pt; mv->when = dist; mv->E = en; next[num_next++].moves = copy_moves(c.moves); mv->when = 0; } else { free(pt); } } free(loopidx); return num_next; } PRIVATE int find_path_once(vrna_fold_compound_t *vc, const char *s1, const char *s2, int maxl, int maxE) { short *pt1, *pt2; move_t *mlist; int i, len, d, dist = 0, result; intermediate_t *current, *next; pt1 = vrna_ptable(s1); pt2 = vrna_ptable(s2); len = (int)strlen(s1); mlist = (move_t *)vrna_alloc(sizeof(move_t) * len); /* bp_dist < n */ for (i = 1; i <= len; i++) { if (pt1[i] != pt2[i]) { if (i < pt1[i]) { /* need to delete this pair */ mlist[dist].i = -i; mlist[dist].j = -pt1[i]; mlist[dist++].when = 0; } if (i < pt2[i]) { /* need to insert this pair */ mlist[dist].i = i; mlist[dist].j = pt2[i]; mlist[dist++].when = 0; } } } free(pt2); BP_dist = dist; current = (intermediate_t *)vrna_alloc(sizeof(intermediate_t) * (maxl + 1)); current[0].pt = pt1; current[0].Sen = current[0].curr_en = vrna_eval_structure_pt(vc, pt1); current[0].moves = mlist; next = (intermediate_t *)vrna_alloc(sizeof(intermediate_t) * (dist * maxl + 1)); for (d = 1; d <= dist; d++) { /* go through the distance classes */ int c, u, num_next = 0; intermediate_t *cc; for (c = 0; current[c].pt != NULL; c++) num_next += try_moves(vc, current[c], maxE, next + num_next, d); if (num_next == 0) { for (cc = current; cc->pt != NULL; cc++) free_intermediate(cc); current[0].Sen = INT_MAX; break; } /* remove duplicates via sort|uniq * if this becomes a bottleneck we can use a hash instead */ qsort(next, num_next, sizeof(intermediate_t), compare_ptable); for (u = 0, c = 1; c < num_next; c++) { if (memcmp(next[u].pt, next[c].pt, sizeof(short) * len) != 0) next[++u] = next[c]; else free_intermediate(next + c); } num_next = u + 1; qsort(next, num_next, sizeof(intermediate_t), compare_energy); /* free the old stuff */ for (cc = current; cc->pt != NULL; cc++) free_intermediate(cc); for (u = 0; u < maxl && u < num_next; u++) current[u] = next[u]; for (; u < num_next; u++) free_intermediate(next + u); num_next = 0; } free(next); path = current[0].moves; result = current[0].Sen; free(current[0].pt); free(current); return result; } PRIVATE void free_intermediate(intermediate_t *i) { free(i->pt); free(i->moves); i->pt = NULL; i->moves = NULL; i->Sen = INT_MAX; } PRIVATE int compare_ptable(const void *A, const void *B) { intermediate_t *a, *b; int c; a = (intermediate_t *)A; b = (intermediate_t *)B; c = memcmp(a->pt, b->pt, a->pt[0] * sizeof(short)); if (c != 0) return c; if ((a->Sen - b->Sen) != 0) return a->Sen - b->Sen; return a->curr_en - b->curr_en; } PRIVATE int compare_energy(const void *A, const void *B) { intermediate_t *a, *b; a = (intermediate_t *)A; b = (intermediate_t *)B; if ((a->Sen - b->Sen) != 0) return a->Sen - b->Sen; return a->curr_en - b->curr_en; } PRIVATE int compare_moves_when(const void *A, const void *B) { move_t *a, *b; a = (move_t *)A; b = (move_t *)B; return a->when - b->when; } PRIVATE move_t * copy_moves(move_t *mvs) { move_t *new; new = (move_t *)vrna_alloc(sizeof(move_t) * (BP_dist + 1)); memcpy(new, mvs, sizeof(move_t) * (BP_dist + 1)); return new; } #ifdef TEST_FINDPATH PUBLIC void print_path(const char *seq, const char *struc) { int d; char *s; s = strdup(struc); if (cut_point == -1) { printf("%s\n%s\n", seq, s); } /* printf("%s\n%s %6.2f\n", seq, s, vrna_eval_structure_simple(seq,s)); */ else { char *pstruct, *pseq; pstruct = vrna_cut_point_insert(s, cut_point); pseq = vrna_cut_point_insert(seq, cut_point); printf("%s\n%s\n", pseq, pstruct); /* printf("%s\n%s %6.2f\n", pseq, pstruct, vrna_eval_structure_simple(seq,s)); */ free(pstruct); free(pseq); } qsort(path, BP_dist, sizeof(move_t), compare_moves_when); for (d = 0; d < BP_dist; d++) { int i, j; i = path[d].i; j = path[d].j; if (i < 0) { /* delete */ s[(-i) - 1] = s[(-j) - 1] = '.'; } else { s[i - 1] = '('; s[j - 1] = ')'; } /* printf("%s %6.2f - %6.2f\n", s, vrna_eval_structure_simple(seq,s), path[d].E/100.0); */ } free(s); } int main(int argc, char *argv[]) { char *line, *seq, *s1, *s2; int E, maxkeep = 1000; int verbose = 0, i; vrna_path_t *route, *r; for (i = 1; i < argc; i++) { switch (argv[i][1]) { case 'm': if (strcmp(argv[i], "-m") == 0) sscanf(argv[++i], "%d", &maxkeep); break; case 'v': verbose = !strcmp(argv[i], "-v"); break; case 'd': if (strcmp(argv[i], "-d") == 0) sscanf(argv[++i], "%d", &dangles); break; default: usage(); } } cut_point = -1; line = vrna_read_line(stdin); seq = vrna_cut_point_remove(line, &cut_point); free(line); line = vrna_read_line(stdin); s1 = vrna_cut_point_remove(line, &cut_point); free(line); line = vrna_read_line(stdin); s2 = vrna_cut_point_remove(line, &cut_point); free(line); E = find_saddle(seq, s1, s2, maxkeep); printf("saddle_energy = %6.2f\n", E / 100.); if (verbose) { if (path_fwd) print_path(seq, s1); else print_path(seq, s2); free(path); path = NULL; route = get_path(seq, s1, s2, maxkeep); for (r = route; r->s; r++) { if (cut_point == -1) { printf("%s %6.2f\n", r->s, r->en); /* printf("%s %6.2f - %6.2f\n", r->s, vrna_eval_structure_simple(seq,r->s), r->en); */ } else { char *pstruct; pstruct = vrna_cut_point_insert(r->s, cut_point); printf("%s %6.2f\n", pstruct, r->en); /* printf("%s %6.2f - %6.2f\n", pstruct, vrna_eval_structure_simple(seq,r->s), r->en); */ free(pstruct); } free(r->s); } free(route); } free(seq); free(s1); free(s2); return EXIT_SUCCESS; } static void usage(void) { vrna_message_error("usage: findpath.c [-m depth] [-d[0|1|2]] [-v]"); } #endif
matmul-decompose.c
/* * Square matrix multiplication * A[N][N] * B[N][N] = C[N][N] * */ #include <stdio.h> #include <stdlib.h> #include <sys/timeb.h> #include <omp.h> /* read timer in second */ double read_timer() { struct timeb tm; ftime(&tm); return (double) tm.time + (double) tm.millitm / 1000.0; } /* read timer in ms */ double read_timer_ms() { struct timeb tm; ftime(&tm); return (double) tm.time * 1000.0 + (double) tm.millitm; } #define REAL float void init(int N, REAL A[][N]) { int i, j; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { A[i][j] = (REAL) drand48(); } } } double maxerror(int N, REAL A[][N], REAL B[][N]) { int i, j; double error = 0.0; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { double diff = (A[i][j] - B[i][j]) / A[i][j]; if (diff < 0) diff = -diff; if (diff > error) error = diff; } } return error; } void matmul_base(int N, REAL A[][N], REAL B[][N], REAL C[][N]); void matmul_base_sub(int i_start, int j_start, int Mt, int Nt, int N, REAL A[][N], REAL B[][N], REAL C[][N]); void matmul_row1D_dist(int N, REAL A[][N], REAL B[][N], REAL C[][N], int num_tasks); void matmul_column1D_dist(int N, REAL A[][N], REAL B[][N], REAL C[][N], int num_tasks); void matmul_rowcol2D_dist(int N, REAL A[][N], REAL B[][N], REAL C[][N], int num_tasks_row, int num_tasks_col); int main(int argc, char *argv[]) { int N; int num_tasks = 4; /* 4 is default number of tasks */ double elapsed_base, elapsed_row1D_dist, elapsed_column1D_dist, elapsed_rowcol2D_dist; /* for timing */ if (argc < 2) { fprintf(stderr, "Usage: matmul <n> [<#tasks(%d)>]\n", num_tasks); exit(1); } N = atoi(argv[1]); if (argc > 2) num_tasks = atoi(argv[2]); REAL A[N][N]; REAL B[N][N]; REAL C_base[N][N]; REAL C_row1D_dist[N][N]; REAL C_column1D_dist[N][N]; REAL C_rowcol2D_dist[N][N]; srand48(1 << 12); init(N, A); init(N, B); /* example run */ elapsed_base = read_timer(); matmul_base(N, A, B, C_base); elapsed_base = (read_timer() - elapsed_base); elapsed_row1D_dist = read_timer(); matmul_row1D_dist(N, A, B, C_row1D_dist, num_tasks); elapsed_row1D_dist = (read_timer() - elapsed_row1D_dist); elapsed_column1D_dist = read_timer(); matmul_column1D_dist(N, A, B, C_column1D_dist, num_tasks); elapsed_column1D_dist = (read_timer() - elapsed_column1D_dist); elapsed_rowcol2D_dist = read_timer(); matmul_rowcol2D_dist(N, A, B, C_rowcol2D_dist, num_tasks, num_tasks); elapsed_rowcol2D_dist = (read_timer() - elapsed_rowcol2D_dist); /* you should add the call to each function and time the execution */ printf("======================================================================================================\n"); printf("\tMatrix Multiplication: A[N][N] * B[N][N] = C[N][N], N=%d\n", N); printf("------------------------------------------------------------------------------------------------------\n"); printf("Performance:\t\tRuntime (ms)\t MFLOPS \t\tError (compared to base)\n"); printf("------------------------------------------------------------------------------------------------------\n"); printf("matmul_base:\t\t%4f\t%4f \t\t%g\n", elapsed_base * 1.0e3, ((((2.0 * N) * N) * N) / (1.0e6 * elapsed_base)), maxerror(N, C_base, C_base)); printf("matmul_row1D_dist:\t%4f\t%4f \t\t%g\n", elapsed_row1D_dist * 1.0e3, ((((2.0 * N) * N) * N) / (1.0e6 * elapsed_row1D_dist)), maxerror(N, C_base, C_row1D_dist)); printf("matmul_column1D_dist:\t%4f\t%4f \t\t%g\n", elapsed_column1D_dist * 1.0e3, ((((2.0 * N) * N) * N) / (1.0e6 * elapsed_column1D_dist)), maxerror(N, C_base, C_column1D_dist)); printf("matmul_rowcol2D_dist:\t%4f\t%4f \t\t%g\n", elapsed_rowcol2D_dist * 1.0e3, ((((2.0 * N) * N) * N) / (1.0e6 * elapsed_rowcol2D_dist)), maxerror(N, C_base, C_rowcol2D_dist)); return 0; } void matmul_base(int N, REAL A[][N], REAL B[][N], REAL C[][N]) { int i, j, k; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { C[i][j] = 0; for (k = 0; k < N; k++) C[i][j] += A[i][k] * B[k][j]; } } } /* compute submatrix multiplication, A[start:length] notation * A[i_start:Mt][N] x B[N][j_start:Nt] = C[i_start:Mt][j_start:Nt] */ void matmul_base_sub(int i_start, int j_start, int Mt, int Nt, int N, REAL A[][N], REAL B[][N], REAL C[][N]) { int i, j, k; for (i = i_start; i < Mt + i_start; i++) { for (j = j_start; j < Nt + j_start; j++) { C[i][j] = 0; for (k = 0; k < N; k++) C[i][j] += A[i][k] * B[k][j]; } } } /* this is a sequential verion showing the decomposition */ void matmul_row1D_dist_seq(int N, REAL A[][N], REAL B[][N], REAL C[][N], int num_tasks) { int tid; for (tid = 0; tid < num_tasks; tid++) { int i_start, j_start; int Mt, Nt; Mt = N / num_tasks; Nt = N; i_start = tid * Mt; j_start = 0; matmul_base_sub(i_start, j_start, Mt, Nt, N, A, B, C); } } void matmul_row1D_dist(int N, REAL A[][N], REAL B[][N], REAL C[][N], int num_tasks) { #pragma omp parallel num_threads(num_tasks) { int tid = omp_get_thread_num(); int i_start, j_start; int Mt, Nt; Mt = N / num_tasks; Nt = N; i_start = tid * Mt; j_start = 0; matmul_base_sub(i_start, j_start, Mt, Nt, N, A, B, C); } } void matmul_column1D_dist(int N, REAL A[][N], REAL B[][N], REAL C[][N], int num_tasks) { #pragma omp parallel num_threads(num_tasks) { int tid = omp_get_thread_num(); int i_start, j_start; int Mt, Nt; Mt = N; Nt = N / num_tasks; i_start = 0; j_start = tid * Nt; matmul_base_sub(i_start, j_start, Mt, Nt, N, A, B, C); } } void matmul_rowcol2D_dist(int N, REAL A[][N], REAL B[][N], REAL C[][N], int num_tasks_row, int num_tasks_col) { #pragma omp parallel num_threads(num_tasks_row + num_tasks_col) { int tid = omp_get_thread_num(); int i_start, j_start; int Mt, Nt; Mt = N / num_tasks_row; Nt = N / num_tasks_col; i_start = tid * Mt; j_start = tid * Nt; matmul_base_sub(i_start, j_start, Mt, Nt, N, A, B, C); } }
GB_unaryop__abs_uint8_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint8_bool // op(A') function: GB_tran__abs_uint8_bool // C type: uint8_t // A type: bool // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint8_t z = (uint8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT8 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint8_bool ( uint8_t *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint8_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
canny_edge_openmp.c
/* gcc -o canny_edge canny_edge.c hysteresis.c pgm_io.c -lm (Note: You can also use optimization such as -O3) The resulting program, canny_edge, will process images in the PGM format. Parameter selection is left up to the user. A broad range of parameters to use as a starting point are: sigma 0.60-2.40, tlow 0.20-0.50 and, thigh 0.60-0.90. If you are using a Unix system, PGM file format conversion tools can be found at ftp://wuarchive.wustl.edu/graphics/graphics/packages/pbmplus/. Otherwise, it would be easy for anyone to rewrite the image I/O procedures because they are listed in the separate file pgm_io.c. If you want to check your compiled code, you can download grey-scale and edge images from http://marathon.csee.usf.edu/edge/edge_detection.html. You can use the parameters given in the edge filenames and check whether the edges that are output from your program match the edge images posted at that address. */ /******************************************************************************* * -------------------------------------------- *(c) 2001 University of South Florida, Tampa * Use, or copying without permission prohibited. * PERMISSION TO USE * In transmitting this software, permission to use for research and * educational purposes is hereby granted. This software may be copied for * archival and backup purposes only. This software may not be transmitted * to a third party without prior permission of the copyright holder. This * permission may be granted only by Mike Heath or Prof. Sudeep Sarkar of * University of South Florida (sarkar@csee.usf.edu). Acknowledgment as * appropriate is respectfully requested. * * Heath, M., Sarkar, S., Sanocki, T., and Bowyer, K. Comparison of edge * detectors: a methodology and initial study, Computer Vision and Image * Understanding 69 (1), 38-54, January 1998. * Heath, M., Sarkar, S., Sanocki, T. and Bowyer, K.W. A Robust Visual * Method for Assessing the Relative Performance of Edge Detection * Algorithms, IEEE Transactions on Pattern Analysis and Machine * Intelligence 19 (12), 1338-1359, December 1997. * ------------------------------------------------------ * * PROGRAM: canny_edge * PURPOSE: This program implements a "Canny" edge detector. The processing * steps are as follows: * * 1) Convolve the image with a separable gaussian filter. * 2) Take the dx and dy the first derivatives using [-1,0,1] and [1,0,-1]'. * 3) Compute the magnitude: sqrt(dx*dx+dy*dy). * 4) Perform non-maximal suppression. * 5) Perform hysteresis. * * The user must input three parameters. These are as follows: * * sigma = The standard deviation of the gaussian smoothing filter. * tlow = Specifies the low value to use in hysteresis. This is a * fraction (0-1) of the computed high threshold edge strength value. * thigh = Specifies the high value to use in hysteresis. This fraction (0-1) * specifies the percentage point in a histogram of the gradient of * the magnitude. Magnitude values of zero are not counted in the * histogram. * * NAME: Mike Heath * Computer Vision Laboratory * University of South Floeida * heath@csee.usf.edu * * DATE: 2/15/96 * * Modified: 5/17/96 - To write out a floating point RAW headerless file of * the edge gradient "up the edge" where the angle is * defined in radians counterclockwise from the x direction. * (Mike Heath) *******************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include "hysteresis.c" #include "pgm_io.c" #define VERBOSE 0 #define BOOSTBLURFACTOR 90.0 int read_pgm_image(char *infilename, unsigned char **image, int *rows, int *cols); int write_pgm_image(char *outfilename, unsigned char *image, int rows, int cols, char *comment, int maxval); void canny(unsigned char *image, int rows, int cols, float sigma, float tlow, float thigh, unsigned char **edge, char *fname); void gaussian_smooth(unsigned char *image, int rows, int cols, float sigma, short int **smoothedim); void make_gaussian_kernel(float sigma, float **kernel, int *windowsize); void derrivative_x_y(short int *smoothedim, int rows, int cols, short int **delta_x, short int **delta_y); void magnitude_x_y(short int *delta_x, short int *delta_y, int rows, int cols, short int **magnitude); void apply_hysteresis(short int *mag, unsigned char *nms, int rows, int cols, float tlow, float thigh, unsigned char *edge); void radian_direction(short int *delta_x, short int *delta_y, int rows, int cols, float **dir_radians, int xdirtag, int ydirtag); double angle_radians(double x, double y); int main(int argc, char *argv[]) { char *infilename = NULL; /* Name of the input image */ char *dirfilename = NULL; /* Name of the output gradient direction image */ char outfilename[128]; /* Name of the output "edge" image */ char composedfname[128]; /* Name of the output "direction" image */ unsigned char *image; /* The input image */ unsigned char *edge; /* The output edge image */ int rows, cols; /* The dimensions of the image. */ float sigma, /* Standard deviation of the gaussian kernel. */ tlow, /* Fraction of the high threshold in hysteresis. */ thigh; /* High hysteresis threshold control. The actual threshold is the (100 * thigh) percentage point in the histogram of the magnitude of the gradient image that passes non-maximal suppression. */ /**************************************************************************** * Get the command line arguments. ****************************************************************************/ if(argc < 5){ fprintf(stderr,"\n<USAGE> %s image sigma tlow thigh [writedirim]\n",argv[0]); fprintf(stderr,"\n image: An image to process. Must be in "); fprintf(stderr,"PGM format.\n"); fprintf(stderr," sigma: Standard deviation of the gaussian"); fprintf(stderr," blur kernel.\n"); fprintf(stderr," tlow: Fraction (0.0-1.0) of the high "); fprintf(stderr,"edge strength threshold.\n"); fprintf(stderr," thigh: Fraction (0.0-1.0) of the distribution"); fprintf(stderr," of non-zero edge\n strengths for "); fprintf(stderr,"hysteresis. The fraction is used to compute\n"); fprintf(stderr," the high edge strength threshold.\n"); fprintf(stderr," writedirim: Optional argument to output "); fprintf(stderr,"a floating point"); fprintf(stderr," direction image.\n\n"); exit(1); } infilename = argv[1]; sigma = atof(argv[2]); tlow = atof(argv[3]); thigh = atof(argv[4]); if(argc == 6) dirfilename = infilename; else dirfilename = NULL; /**************************************************************************** * Read in the image. This read function allocates memory for the image. ****************************************************************************/ if(VERBOSE) printf("Reading the image %s.\n", infilename); if(read_pgm_image(infilename, &image, &rows, &cols) == 0){ fprintf(stderr, "Error reading the input image, %s.\n", infilename); exit(1); } /**************************************************************************** * Perform the edge detection. All of the work takes place here. ****************************************************************************/ if(VERBOSE) printf("Starting Canny edge detection.\n"); if(dirfilename != NULL){ sprintf(composedfname, "%s_s_%3.2f_l_%3.2f_h_%3.2f.fim", infilename, sigma, tlow, thigh); dirfilename = composedfname; } canny(image, rows, cols, sigma, tlow, thigh, &edge, dirfilename); /**************************************************************************** * Write out the edge image to a file. ****************************************************************************/ sprintf(outfilename, "%s_s_%3.2f_l_%3.2f_h_%3.2f.pgm", infilename, sigma, tlow, thigh); if(VERBOSE) printf("Writing the edge iname in the file %s.\n", outfilename); if(write_pgm_image(outfilename, edge, rows, cols, "", 255) == 0){ fprintf(stderr, "Error writing the edge image, %s.\n", outfilename); exit(1); } return 0; } /******************************************************************************* * PROCEDURE: canny * PURPOSE: To perform canny edge detection. * NAME: Mike Heath * DATE: 2/15/96 *******************************************************************************/ void canny(unsigned char *image, int rows, int cols, float sigma, float tlow, float thigh, unsigned char **edge, char *fname) { FILE *fpdir=NULL; /* File to write the gradient image to. */ unsigned char *nms; /* Points that are local maximal magnitude. */ short int *smoothedim, /* The image after gaussian smoothing. */ *delta_x, /* The first devivative image, x-direction. */ *delta_y, /* The first derivative image, y-direction. */ *magnitude; /* The magnitude of the gadient image. */ int r, c, pos; float *dir_radians=NULL; /* Gradient direction image. */ /**************************************************************************** * Perform gaussian smoothing on the image using the input standard * deviation. ****************************************************************************/ if(VERBOSE) printf("Smoothing the image using a gaussian kernel.\n"); gaussian_smooth(image, rows, cols, sigma, &smoothedim); /**************************************************************************** * Compute the first derivative in the x and y directions. ****************************************************************************/ if(VERBOSE) printf("Computing the X and Y first derivatives.\n"); derrivative_x_y(smoothedim, rows, cols, &delta_x, &delta_y); /**************************************************************************** * This option to write out the direction of the edge gradient was added * to make the information available for computing an edge quality figure * of merit. ****************************************************************************/ if(fname != NULL){ /************************************************************************* * Compute the direction up the gradient, in radians that are * specified counteclockwise from the positive x-axis. *************************************************************************/ radian_direction(delta_x, delta_y, rows, cols, &dir_radians, -1, -1); /************************************************************************* * Write the gradient direction image out to a file. *************************************************************************/ if((fpdir = fopen(fname, "wb")) == NULL){ fprintf(stderr, "Error opening the file %s for writing.\n", fname); exit(1); } fwrite(dir_radians, sizeof(float), rows*cols, fpdir); fclose(fpdir); free(dir_radians); } /**************************************************************************** * Compute the magnitude of the gradient. ****************************************************************************/ if(VERBOSE) printf("Computing the magnitude of the gradient.\n"); magnitude_x_y(delta_x, delta_y, rows, cols, &magnitude); /**************************************************************************** * Perform non-maximal suppression. ****************************************************************************/ if(VERBOSE) printf("Doing the non-maximal suppression.\n"); if((nms = (unsigned char *) calloc(rows*cols,sizeof(unsigned char)))==NULL){ fprintf(stderr, "Error allocating the nms image.\n"); exit(1); } non_max_supp(magnitude, delta_x, delta_y, rows, cols, nms); /**************************************************************************** * Use hysteresis to mark the edge pixels. ****************************************************************************/ if(VERBOSE) printf("Doing hysteresis thresholding.\n"); if((*edge=(unsigned char *)calloc(rows*cols,sizeof(unsigned char))) ==NULL){ fprintf(stderr, "Error allocating the edge image.\n"); exit(1); } apply_hysteresis(magnitude, nms, rows, cols, tlow, thigh, *edge); /**************************************************************************** * Free all of the memory that we allocated except for the edge image that * is still being used to store out result. ****************************************************************************/ free(smoothedim); free(delta_x); free(delta_y); free(magnitude); free(nms); } /******************************************************************************* * Procedure: radian_direction * Purpose: To compute a direction of the gradient image from component dx and * dy images. Because not all derriviatives are computed in the same way, this * code allows for dx or dy to have been calculated in different ways. * * FOR X: xdirtag = -1 for [-1 0 1] * xdirtag = 1 for [ 1 0 -1] * * FOR Y: ydirtag = -1 for [-1 0 1]' * ydirtag = 1 for [ 1 0 -1]' * * The resulting angle is in radians measured counterclockwise from the * xdirection. The angle points "up the gradient". *******************************************************************************/ void radian_direction(short int *delta_x, short int *delta_y, int rows, int cols, float **dir_radians, int xdirtag, int ydirtag) { int r, c, pos; float *dirim=NULL; double dx, dy; /**************************************************************************** * Allocate an image to store the direction of the gradient. ****************************************************************************/ if((dirim = (float *) calloc(rows*cols, sizeof(float))) == NULL){ fprintf(stderr, "Error allocating the gradient direction image.\n"); exit(1); } *dir_radians = dirim; for(r=0,pos=0;r<rows;r++){ for(c=0;c<cols;c++,pos++){ dx = (double)delta_x[pos]; dy = (double)delta_y[pos]; if(xdirtag == 1) dx = -dx; if(ydirtag == -1) dy = -dy; dirim[pos] = (float)angle_radians(dx, dy); } } } /******************************************************************************* * FUNCTION: angle_radians * PURPOSE: This procedure computes the angle of a vector with components x and * y. It returns this angle in radians with the answer being in the range * 0 <= angle <2*PI. *******************************************************************************/ double angle_radians(double x, double y) { double xu, yu, ang; xu = fabs(x); yu = fabs(y); if((xu == 0) && (yu == 0)) return(0); ang = atan(yu/xu); if(x >= 0){ if(y >= 0) return(ang); else return(2*M_PI - ang); } else{ if(y >= 0) return(M_PI - ang); else return(M_PI + ang); } } /******************************************************************************* * PROCEDURE: magnitude_x_y * PURPOSE: Compute the magnitude of the gradient. This is the square root of * the sum of the squared derivative values. * NAME: Mike Heath * DATE: 2/15/96 *******************************************************************************/ void magnitude_x_y(short int *delta_x, short int *delta_y, int rows, int cols, short int **magnitude) { int r, c, pos, sq1, sq2; /**************************************************************************** * Allocate an image to store the magnitude of the gradient. ****************************************************************************/ if((*magnitude = (short *) calloc(rows*cols, sizeof(short))) == NULL){ fprintf(stderr, "Error allocating the magnitude image.\n"); exit(1); } for(r=0,pos=0;r<rows;r++){ for(c=0;c<cols;c++,pos++){ sq1 = (int)delta_x[pos] * (int)delta_x[pos]; sq2 = (int)delta_y[pos] * (int)delta_y[pos]; (*magnitude)[pos] = (short)(0.5 + sqrt((float)sq1 + (float)sq2)); } } } /******************************************************************************* * PROCEDURE: derrivative_x_y * PURPOSE: Compute the first derivative of the image in both the x any y * directions. The differential filters that are used are: * * -1 * dx = -1 0 +1 and dy = 0 * +1 * * NAME: Mike Heath * DATE: 2/15/96 *******************************************************************************/ void derrivative_x_y(short int *smoothedim, int rows, int cols, short int **delta_x, short int **delta_y) { int r, c, pos; /**************************************************************************** * Allocate images to store the derivatives. ****************************************************************************/ if(((*delta_x) = (short *) calloc(rows*cols, sizeof(short))) == NULL){ fprintf(stderr, "Error allocating the delta_x image.\n"); exit(1); } if(((*delta_y) = (short *) calloc(rows*cols, sizeof(short))) == NULL){ fprintf(stderr, "Error allocating the delta_x image.\n"); exit(1); } /**************************************************************************** * Compute the x-derivative. Adjust the derivative at the borders to avoid * losing pixels. ****************************************************************************/ if(VERBOSE) printf(" Computing the X-direction derivative.\n"); for(r=0;r<rows;r++){ pos = r * cols; (*delta_x)[pos] = smoothedim[pos+1] - smoothedim[pos]; pos++; for(c=1;c<(cols-1);c++,pos++){ (*delta_x)[pos] = smoothedim[pos+1] - smoothedim[pos-1]; } (*delta_x)[pos] = smoothedim[pos] - smoothedim[pos-1]; } /**************************************************************************** * Compute the y-derivative. Adjust the derivative at the borders to avoid * losing pixels. ****************************************************************************/ if(VERBOSE) printf(" Computing the Y-direction derivative.\n"); for(c=0;c<cols;c++){ pos = c; (*delta_y)[pos] = smoothedim[pos+cols] - smoothedim[pos]; pos += cols; for(r=1;r<(rows-1);r++,pos+=cols){ (*delta_y)[pos] = smoothedim[pos+cols] - smoothedim[pos-cols]; } (*delta_y)[pos] = smoothedim[pos] - smoothedim[pos-cols]; } } /******************************************************************************* * PROCEDURE: gaussian_smooth * PURPOSE: Blur an image with a gaussian filter. * NAME: Mike Heath * DATE: 2/15/96 *******************************************************************************/ void gaussian_smooth(unsigned char *image, int rows, int cols, float sigma, short int **smoothedim) { int r, c, rr, cc, /* Counter variables. */ windowsize, /* Dimension of the gaussian kernel. */ center; /* Half of the windowsize. */ float *tempim, /* Buffer for separable filter gaussian smoothing. */ *kernel, /* A one dimensional gaussian kernel. */ dot, /* Dot product summing variable. */ sum; /* Sum of the kernel weights variable. */ /**************************************************************************** * Create a 1-dimensional gaussian smoothing kernel. ****************************************************************************/ if(VERBOSE) printf(" Computing the gaussian smoothing kernel.\n"); make_gaussian_kernel(sigma, &kernel, &windowsize); center = windowsize / 2; /**************************************************************************** * Allocate a temporary buffer image and the smoothed image. ****************************************************************************/ if((tempim = (float *) calloc(rows*cols, sizeof(float))) == NULL){ fprintf(stderr, "Error allocating the buffer image.\n"); exit(1); } if(((*smoothedim) = (short int *) calloc(rows*cols, sizeof(short int))) == NULL){ fprintf(stderr, "Error allocating the smoothed image.\n"); exit(1); } /**************************************************************************** * Blur in the x - direction. ****************************************************************************/ if(VERBOSE) printf(" Bluring the image in the X-direction.\n"); #pragma omp parallel for private(cc) reduction(+: dot, sum) collapse(2) for(r=0;r<rows;r++){ for(c=0;c<cols;c++){ dot = 0.0; sum = 0.0; for(cc=(-center);cc<=center;cc++){ if(((c+cc) >= 0) && ((c+cc) < cols)){ dot += (float)image[r*cols+(c+cc)] * kernel[center+cc]; sum += kernel[center+cc]; } } tempim[r*cols+c] = dot/sum; } } /**************************************************************************** * Blur in the y - direction. ****************************************************************************/ if(VERBOSE) printf(" Bluring the image in the Y-direction.\n"); #pragma omp parallel for private(rr) reduction(+: dot, sum) collapse(2) for(c=0;c<cols;c++){ for(r=0;r<rows;r++){ sum = 0.0; dot = 0.0; for(rr=(-center);rr<=center;rr++){ if(((r+rr) >= 0) && ((r+rr) < rows)){ dot += tempim[(r+rr)*cols+c] * kernel[center+rr]; sum += kernel[center+rr]; } } (*smoothedim)[r*cols+c] = (short int)(dot*BOOSTBLURFACTOR/sum + 0.5); } } free(tempim); free(kernel); } /******************************************************************************* * PROCEDURE: make_gaussian_kernel * PURPOSE: Create a one dimensional gaussian kernel. * NAME: Mike Heath * DATE: 2/15/96 *******************************************************************************/ void make_gaussian_kernel(float sigma, float **kernel, int *windowsize) { int i, center; float x, fx, sum=0.0; *windowsize = 1 + 2 * ceil(2.5 * sigma); center = (*windowsize) / 2; if(VERBOSE) printf(" The kernel has %d elements.\n", *windowsize); if((*kernel = (float *) calloc((*windowsize), sizeof(float))) == NULL){ fprintf(stderr, "Error callocing the gaussian kernel array.\n"); exit(1); } for(i=0;i<(*windowsize);i++){ x = (float)(i - center); fx = pow(2.71828, -0.5*x*x/(sigma*sigma)) / (sigma * sqrt(6.2831853)); (*kernel)[i] = fx; sum += fx; } for(i=0;i<(*windowsize);i++) (*kernel)[i] /= sum; if(VERBOSE){ printf("The filter coefficients are:\n"); for(i=0;i<(*windowsize);i++) printf("kernel[%d] = %f\n", i, (*kernel)[i]); } }
target_exit_data.c
#pragma omp target exit data [clauses]
XT_genSinogram.c
/* ============================================================================ * Copyright (c) 2013 K. Aditya Mohan (Purdue University) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this * list of conditions and the following disclaimer in the documentation and/or * other materials provided with the distribution. * * Neither the name of K. Aditya Mohan, Purdue * University, nor the names of its contributors may be used * to endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /*#include <iostream>*/ /*#include "TiffUtilities.h"*/ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "XT_Structures.h" #include "XT_Constants.h" #include "allocate.h" #include <math.h> #include "XT_IOMisc.h" #include "XT_AMatrix.h" #include "XT_Profile.h" #include "randlib.h" #include "XT_Init.h" #include "XT_Debug.h" #include <fftw3.h> #include "XT_CmplxArith.h" #include "XT_MPIIO.h" #include "XT_DensityUpdate.h" #include "randlib.h" /*generates projection data from phantom*/ int32_t ForwardProject (Sinogram* SinoPtr, ScannedObject* ObjPtr, TomoInputs* InpPtr, FFTStruct* fftptr, float *data_unflip_x, float* data_unflip_y) { FILE *fp; long int stream_offset, size, result; int32_t i, j, k, m, n, idx, t, slice, data_idx; uint8_t AvgNumXElements, AvgNumYElements, AvgNumElements; char phantom_file[1000]; int dimTiff[4]; Real_t val, MagPhaseMultiple, detdist_r, sigpwr; Real_arr_t *objptr; MagPhaseMultiple = InpPtr->MagPhaseMultiple; Real_arr_t**** magobject = (Real_arr_t****)multialloc(sizeof(Real_arr_t), 4, ObjPtr->N_z, ObjPtr->N_y, ObjPtr->N_x, 3); Real_arr_t**** magpot = (Real_arr_t****)multialloc(sizeof(Real_arr_t), 4, ObjPtr->N_z, ObjPtr->N_y, ObjPtr->N_x, 3); memset(data_unflip_x, 0, SinoPtr->Nx_p*SinoPtr->N_t*SinoPtr->N_r*sizeof(float)); memset(data_unflip_y, 0, SinoPtr->Ny_p*SinoPtr->N_t*SinoPtr->N_r*sizeof(float)); /* Real_arr_t*** realmagobject = (Real_arr_t****)multialloc(sizeof(Real_arr_t), 4, ObjPtr->N_z, ObjPtr->N_y, ObjPtr->N_x, 3); Real_arr_t*** realelecobject = (Real_arr_t***)multialloc(sizeof(Real_arr_t), 3, ObjPtr->N_z, ObjPtr->N_y, ObjPtr->N_x); */ /*AvgNumXElements over estimates the total number of entries in a single column of A matrix when indexed by both voxel and angle*/ AvgNumXElements = (uint8_t)ceil(3*ObjPtr->delta_x/(SinoPtr->delta_r) + 2); AvgNumYElements = (uint8_t)ceil(3*ObjPtr->delta_y/(SinoPtr->delta_r) + 2); SinoPtr->DetectorResponse_x = (Real_arr_t **)multialloc(sizeof(Real_arr_t), 2, SinoPtr->Nx_p, DETECTOR_RESPONSE_BINS+1); SinoPtr->DetectorResponse_y = (Real_arr_t **)multialloc(sizeof(Real_arr_t), 2, SinoPtr->Ny_p, DETECTOR_RESPONSE_BINS+1); SinoPtr->ZLineResponse = (Real_arr_t *)get_spc(DETECTOR_RESPONSE_BINS + 1, sizeof(Real_arr_t)); DetectorResponseProfile (SinoPtr, ObjPtr, InpPtr); ZLineResponseProfile (SinoPtr, ObjPtr, InpPtr); AvgNumElements = (uint8_t)((ObjPtr->delta_x/SinoPtr->delta_t) + 2); AMatrixCol* VoxelLineResp_X = (AMatrixCol*)get_spc(ObjPtr->N_x, sizeof(AMatrixCol)); for (t = 0; t < ObjPtr->N_x; t++){ VoxelLineResp_X[t].values = (Real_t*)get_spc(AvgNumElements, sizeof(Real_t)); VoxelLineResp_X[t].index = (int32_t*)get_spc(AvgNumElements, sizeof(int32_t)); } storeVoxelLineResponse(VoxelLineResp_X, SinoPtr, ObjPtr->x0, ObjPtr->delta_x, ObjPtr->N_x); AvgNumElements = (uint8_t)((ObjPtr->delta_y/SinoPtr->delta_t) + 2); AMatrixCol* VoxelLineResp_Y = (AMatrixCol*)get_spc(ObjPtr->N_y, sizeof(AMatrixCol)); for (t = 0; t < ObjPtr->N_y; t++){ VoxelLineResp_Y[t].values = (Real_t*)get_spc(AvgNumElements, sizeof(Real_t)); VoxelLineResp_Y[t].index = (int32_t*)get_spc(AvgNumElements, sizeof(int32_t)); } storeVoxelLineResponse(VoxelLineResp_Y, SinoPtr, ObjPtr->y0, ObjPtr->delta_y, ObjPtr->N_y); sprintf(phantom_file, "%s.bin", PHANTOM_MAGDENSITY_FILENAME); fp = fopen (phantom_file, "rb"); if (fp == NULL) { check_info(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "Error in reading file %s\n", phantom_file); exit(1); } size = (long int)ObjPtr->N_z*(long int)ObjPtr->N_y*(long int)ObjPtr->N_x*3; check_info(InpPtr->node_rank==0,InpPtr->debug_file_ptr, "Forward projecting mag phantom ...\n"); /* stream_offset = (long int)PHANTOM_OFFSET*(long int)ObjPtr->N_z*(long int)ObjPtr->N_y*(long int)ObjPtr->N_x*(long int)InpPtr->node_num; */ stream_offset = (long int)ObjPtr->N_z*(long int)ObjPtr->N_y*(long int)ObjPtr->N_x*(long int)InpPtr->node_rank; result = fseek (fp, stream_offset*sizeof(Real_arr_t), SEEK_SET); if (result != 0) { check_info(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "ERROR: Error in seeking file %s, stream_offset = %ld\n",phantom_file,stream_offset); exit(1); } result = fread (&(magobject[0][0][0][0]), sizeof(Real_arr_t), size, fp); if (result != size) { check_info(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "ERROR: Reading file %s, Number of elements read does not match required, number of elements read=%ld, stream_offset=%ld, size=%ld\n",phantom_file,result,stream_offset,size); exit(1); } fclose(fp); compute_magcrossprodtran (magobject, magpot, ObjPtr->MagFilt, fftptr, ObjPtr->N_z, ObjPtr->N_y, ObjPtr->N_x, 1); Write2Bin (PHANTOM_MAGDENSITY_FILENAME, ObjPtr->N_z, ObjPtr->N_y, ObjPtr->N_x, 3, sizeof(Real_arr_t), &(magobject[0][0][0][0]), InpPtr->debug_file_ptr); Write2Bin (PHANTOM_MAGVECPOT_FILENAME, ObjPtr->N_z, ObjPtr->N_y, ObjPtr->N_x, 3, sizeof(Real_arr_t), &(magpot[0][0][0][0]), InpPtr->debug_file_ptr); #pragma omp parallel for private(i,j,k,slice,idx,val,m,n,data_idx) for (i=0; i<SinoPtr->Nx_p; i++){ AMatrixCol AMatrix; AMatrix.values = (Real_t*)get_spc((int32_t)AvgNumXElements,sizeof(Real_t)); AMatrix.index = (int32_t*)get_spc((int32_t)AvgNumXElements,sizeof(int32_t)); for (j=0; j<ObjPtr->N_z; j++) for (k=0; k<ObjPtr->N_y; k++){ detdist_r = (ObjPtr->y0 + ((Real_t)k+0.5)*ObjPtr->delta_y)*SinoPtr->cosine_x[i]; detdist_r += -(ObjPtr->z0 + ((Real_t)j+0.5)*ObjPtr->delta_z)*SinoPtr->sine_x[i]; calcAMatrixColumnforAngle(SinoPtr, ObjPtr, SinoPtr->DetectorResponse_x[i], &(AMatrix), detdist_r); for (slice=0; slice<ObjPtr->N_x; slice++){ for (m=0; m<AMatrix.count; m++){ idx=AMatrix.index[m]; val=AMatrix.values[m]; for (n=0; n<VoxelLineResp_X[slice].count; n++) { data_idx = i*SinoPtr->N_t*SinoPtr->N_r + idx*SinoPtr->N_t + VoxelLineResp_X[slice].index[n]; data_unflip_x[data_idx] += val*MagPhaseMultiple*VoxelLineResp_X[slice].values[n]*magpot[j][k][slice][0]*SinoPtr->cosine_x[i]; data_unflip_x[data_idx] += val*MagPhaseMultiple*VoxelLineResp_X[slice].values[n]*magpot[j][k][slice][1]*(-SinoPtr->sine_x[i]); } } } } } #pragma omp parallel for private(i,j,k,slice,idx,val,m,n,data_idx) for (i=0; i<SinoPtr->Ny_p; i++){ AMatrixCol AMatrix; AMatrix.values = (Real_t*)get_spc((int32_t)AvgNumXElements,sizeof(Real_t)); AMatrix.index = (int32_t*)get_spc((int32_t)AvgNumXElements,sizeof(int32_t)); for (j=0; j<ObjPtr->N_z; j++) for (k=0; k<ObjPtr->N_x; k++){ detdist_r = (ObjPtr->x0 + ((Real_t)k+0.5)*ObjPtr->delta_x)*SinoPtr->cosine_y[i]; detdist_r += -(ObjPtr->z0 + ((Real_t)j+0.5)*ObjPtr->delta_z)*SinoPtr->sine_y[i]; calcAMatrixColumnforAngle(SinoPtr, ObjPtr, SinoPtr->DetectorResponse_y[i], &(AMatrix), detdist_r); for (slice=0; slice<ObjPtr->N_y; slice++){ for (m=0; m<AMatrix.count; m++){ idx=AMatrix.index[m]; val=AMatrix.values[m]; for (n=0; n<VoxelLineResp_Y[slice].count; n++) { data_idx = i*SinoPtr->N_t*SinoPtr->N_r + VoxelLineResp_Y[slice].index[n]*SinoPtr->N_r + idx; data_unflip_y[data_idx] += val*MagPhaseMultiple*VoxelLineResp_Y[slice].values[n]*magpot[j][slice][k][0]*SinoPtr->cosine_y[i]; data_unflip_y[data_idx] += val*MagPhaseMultiple*VoxelLineResp_Y[slice].values[n]*magpot[j][slice][k][2]*(-SinoPtr->sine_y[i]); } } } } free(AMatrix.values); free(AMatrix.index); } sigpwr = 0; for (i=0; i<SinoPtr->Nx_p*SinoPtr->N_r*SinoPtr->N_t; i++){ sigpwr += data_unflip_x[i]*data_unflip_x[i]; data_unflip_x[i] += sqrt(1.0/InpPtr->Weight)*normal(); /* printf("weight = %f, const = %f, noise = %e\n", InpPtr->Weight, sqrt(1.0/InpPtr->Weight), normal());*/ } for (i=0; i<SinoPtr->Ny_p*SinoPtr->N_r*SinoPtr->N_t; i++){ sigpwr += data_unflip_y[i]*data_unflip_y[i]; data_unflip_y[i] += sqrt(1.0/InpPtr->Weight)*normal(); } sigpwr /= (SinoPtr->N_r*SinoPtr->N_t*(SinoPtr->Nx_p+SinoPtr->Ny_p)); check_info(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "Added noise to data ........\n"); check_info(InpPtr->node_rank==0, InpPtr->debug_file_ptr, "Average signal power = %e, noise variance = %e, SNR = %e\n", sigpwr, 1.0/InpPtr->Weight, sigpwr/(1.0/InpPtr->Weight)); if (InpPtr->Write2Tiff == 1) { Real_arr_t* tifarray = (Real_arr_t*)get_spc(SinoPtr->Nx_p*SinoPtr->N_t*SinoPtr->N_r, sizeof(Real_arr_t)); size = SinoPtr->Nx_p*SinoPtr->N_t*SinoPtr->N_r; dimTiff[0] = 1; dimTiff[1] = SinoPtr->Nx_p; dimTiff[2] = SinoPtr->N_r; dimTiff[3] = SinoPtr->N_t; for (i = 0; i < size; i++) tifarray[i] = data_unflip_x[i]; WriteMultiDimArray2Tiff ("sim_data_x", dimTiff, 0, 1, 2, 3, tifarray, 0, 0, 1, InpPtr->debug_file_ptr); size = SinoPtr->Ny_p*SinoPtr->N_t*SinoPtr->N_r; dimTiff[0] = 1; dimTiff[1] = SinoPtr->Ny_p; dimTiff[2] = SinoPtr->N_t; dimTiff[3] = SinoPtr->N_r; for (i = 0; i < size; i++) tifarray[i] = data_unflip_y[i]; WriteMultiDimArray2Tiff ("sim_data_y", dimTiff, 0, 1, 2, 3, tifarray, 0, 0, 1, InpPtr->debug_file_ptr); free(tifarray); tifarray = (Real_arr_t*)get_spc(ObjPtr->N_z*ObjPtr->N_y*ObjPtr->N_x*3, sizeof(Real_arr_t)); size = ObjPtr->N_z*ObjPtr->N_y*ObjPtr->N_x*3; dimTiff[0] = ObjPtr->N_z; dimTiff[1] = ObjPtr->N_y; dimTiff[2] = ObjPtr->N_x; dimTiff[3] = 3; objptr = &(magobject[0][0][0][0]); for (i = 0; i < size; i++) tifarray[i] = objptr[i]; WriteMultiDimArray2Tiff (PHANTOM_MAGDENSITY_FILENAME, dimTiff, 0, 3, 1, 2, tifarray, 0, 0, 1, InpPtr->debug_file_ptr); objptr = &(magpot[0][0][0][0]); for (i = 0; i < size; i++) tifarray[i] = objptr[i]; WriteMultiDimArray2Tiff (PHANTOM_MAGVECPOT_FILENAME, dimTiff, 0, 3, 1, 2, tifarray, 0, 0, 1, InpPtr->debug_file_ptr); free(tifarray); } /* size = ObjPtr->N_z*ObjPtr->N_y*ObjPtr->N_x; write_SharedBinFile_At ("mag_phantom", &(realmagobject[0][0][0]), InpPtr->node_rank*size, size, InpPtr->debug_file_ptr); write_SharedBinFile_At ("phase_phantom", &(realphaseobject[0][0][0]), InpPtr->node_rank*size, size, InpPtr->debug_file_ptr);*/ for (t = 0; t < ObjPtr->N_x; t++){ free(VoxelLineResp_X[t].values); free(VoxelLineResp_X[t].index); } free(VoxelLineResp_X); for (t = 0; t < ObjPtr->N_y; t++){ free(VoxelLineResp_Y[t].values); free(VoxelLineResp_Y[t].index); } free(VoxelLineResp_Y); multifree(SinoPtr->DetectorResponse_x,2); multifree(SinoPtr->DetectorResponse_y,2); free(SinoPtr->ZLineResponse); multifree(magobject,4); multifree(magpot,4); /*multifree(realmagobject,3); multifree(realphaseobject,3);*/ /* multifree(fftforw_freq, 3); multifree(fftback_freq, 3); */ return (0); }
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 4; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
GB_dense_subassign_23_template.c
//------------------------------------------------------------------------------ // GB_dense_subassign_23_template: C += B where C is dense; B is sparse or dense //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // All entries in C+=B are computed fully in parallel, using the same kind of // parallelism as Template/GB_AxB_colscale.c. #include "GB_unused.h" { //-------------------------------------------------------------------------- // get C and B //-------------------------------------------------------------------------- const GB_BTYPE *GB_RESTRICT Bx = (GB_BTYPE *) B->x ; GB_CTYPE *GB_RESTRICT Cx = (GB_CTYPE *) C->x ; ASSERT (GB_is_dense (C)) ; if (kfirst_slice == NULL) { //---------------------------------------------------------------------- // C += B when both C and B are dense //---------------------------------------------------------------------- ASSERT (GB_is_dense (B)) ; const int64_t cnz = GB_NNZ (C) ; #if defined ( GB_HAS_CBLAS ) && GB_OP_IS_PLUS_REAL // C += B via GB_cblas_daxpy or GB_cblas_saxpy GB_CBLAS_AXPY // Y += alpha*X ( cnz, // length of X and Y (note: int64_t) (GB_CTYPE) 1, // alpha is 1.0 Bx, // X, always stride 1 Cx, // Y, always stride 1 nthreads // maximum # of threads to use ) ; #elif defined ( GB_HAS_CBLAS ) && GB_OP_IS_MINUS_REAL // C -= B via GB_cblas_daxpy or GB_cblas_saxpy GB_CBLAS_AXPY // Y += alpha*X ( cnz, // length of X and Y (note: int64_t) (GB_CTYPE) -1, // alpha is -1.0 Bx, // X, always stride 1 Cx, // Y, always stride 1 nthreads // maximum # of threads to use ) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < cnz ; p++) { GB_GETB (bij, Bx, p) ; // bij = B(i,j) GB_BINOP (GB_CX (p), GB_CX (p), bij) ; // C(i,j) += bij } #endif } else { //---------------------------------------------------------------------- // C += B when C is dense and B is sparse //---------------------------------------------------------------------- const int64_t *GB_RESTRICT Bp = B->p ; const int64_t *GB_RESTRICT Bh = B->h ; const int64_t *GB_RESTRICT Bi = B->i ; const int64_t cvlen = C->vlen ; int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (taskid = 0 ; taskid < ntasks ; taskid++) { // if kfirst > klast then taskid does no work at all int64_t kfirst = kfirst_slice [taskid] ; int64_t klast = klast_slice [taskid] ; //------------------------------------------------------------------ // C(:,kfirst:klast) += B(:,kfirst:klast) //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // find the part of B(:,k) and C(:,k) for this task //-------------------------------------------------------------- int64_t j = (Bh == NULL) ? k : Bh [k] ; int64_t my_pB_start, my_pB_end ; GB_get_pA_and_pC (&my_pB_start, &my_pB_end, NULL, taskid, k, kfirst, klast, pstart_slice, NULL, NULL, Bp) ; int64_t pB_start = Bp [k] ; bool ajdense = ((Bp [k+1] - pB_start) == cvlen) ; // pC points to the start of C(:,j) if C is dense int64_t pC = j * cvlen ; //-------------------------------------------------------------- // C(:,j) += B(:,j) //-------------------------------------------------------------- if (ajdense) { //---------------------------------------------------------- // both C(:,j) and B(:,j) are dense //---------------------------------------------------------- #if defined ( GB_HAS_CBLAS ) && GB_OP_IS_PLUS_REAL // y += x via GB_cblas_daxpy or GB_cblas_saxpy. // use a single thread since this is already in a // parallel region. int64_t len = my_pB_end - my_pB_start ; int64_t i = my_pB_start - pB_start ; int64_t p = pC + i ; GB_CBLAS_AXPY // Y += alpha*X ( len, // length of X and Y (GB_CTYPE) 1, // alpha is 1.0 Bx + my_pB_start, // X, always stride 1 Cx + p, // Y, always stride 1 1 // use a single thread ) ; #elif defined ( GB_HAS_CBLAS ) && GB_OP_IS_MINUS_REAL // y -= x via GB_cblas_daxpy or GB_cblas_saxpy. // use a single thread since this is already in a // parallel region. int64_t len = my_pB_end - my_pB_start ; int64_t i = my_pB_start - pB_start ; int64_t p = pC + i ; GB_CBLAS_AXPY // Y += alpha*X ( len, // length of X and Y (GB_CTYPE) -1, // alpha is -1.0 Bx + my_pB_start, // X, always stride 1 Cx + p, // Y, always stride 1 1 // use a single thread ) ; #else GB_PRAGMA_SIMD_VECTORIZE for (int64_t pB = my_pB_start ; pB < my_pB_end ; pB++) { int64_t i = pB - pB_start ; int64_t p = pC + i ; // bij = B(i,j) GB_GETB (bij, Bx, pB) ; // C(i,j) += bij GB_BINOP (GB_CX (p), GB_CX (p), bij) ; } #endif } else { //---------------------------------------------------------- // C(:,j) is dense; B(:,j) is sparse //---------------------------------------------------------- GB_PRAGMA_SIMD_VECTORIZE for (int64_t pB = my_pB_start ; pB < my_pB_end ; pB++) { int64_t i = Bi [pB] ; int64_t p = pC + i ; GB_GETB (bij, Bx, pB) ; // bij = B(i,j) GB_BINOP (GB_CX (p), GB_CX (p), bij) ; // C(i,j) += bij } } } } } }
mapping.c
#define _MODULE_MAPPING #define USE_PREFETCH #include <omp.h> #ifdef USE_PREFETCH #include <xmmintrin.h> #endif #include <limits.h> #include "mapping.h" #include "output.h" #include "../common/sw-full-common.h" #include "../common/sw-full-cs.h" #include "../common/sw-full-ls.h" #include "../common/sw-vector.h" #include "../common/read_hit_heap.h" #include "../common/sw-post.h" DEF_HEAP(uint32_t, uint, uu) DEF_HEAP(double, struct read_hit_holder, unpaired) DEF_HEAP(double, struct read_hit_pair_holder, paired) #define RG_GET_MAP_ID(c) ( (c) >> 3 ) #define RG_SET_MAP_ID(c, id) (c) = ( (id) << 3 ) + 0x6; #define RG_GET_HAS_2(c) ( ( (c) & 0x1 ) != 0 ) #define RG_SET_HAS_2(c) (c) |= 0x1 #define RG_VALID_MP_CNT(c) ( ( (c) & 0x6 ) != 0x6 ) #define RG_GET_MP_CNT(c) ( ( (c) >> 1 ) & 0x3 ) #define RG_SET_MP_CNT(c, cnt) (c) &= ~(0x6); (c) |= ( (cnt) << 1 ) /* * Mapping routines */ static void read_get_mapidxs_per_strand(struct read_entry * re, int st) { int i, sn, load, base, r_idx; //uint32_t * kmerWindow = (uint32_t *)xcalloc(sizeof(kmerWindow[0]) * BPTO32BW(max_seed_span)); uint32_t kmerWindow[BPTO32BW(max_seed_span)]; assert(re != NULL); assert(re->mapidx[st] == NULL); #ifdef ENABLE_LOW_QUALITY_FILTER if (re->filter_qual == NULL) { re->filter_qual = (char *)xmalloc(strlen(re->qual) + 17); read_quality_filter_preprocess (re->qual, re->filter_qual); } #endif //re->mapidx[st] = (uint32_t *)xmalloc(n_seeds * re->max_n_kmers * sizeof(re->mapidx[0][0])); re->mapidx[st] = (uint32_t *) my_malloc(n_seeds * re->max_n_kmers * sizeof(re->mapidx[0][0]), &mem_mapping, "mapidx [%s]", re->name); load = 0; for (i = 0; i < re->read_len; i++) { base = EXTRACT(re->read[st], i); bitfield_prepend(kmerWindow, max_seed_span, base); if (load < max_seed_span) load++; for (sn = 0; sn < n_seeds; sn++) { r_idx = i - seed[sn].span + 1; #ifdef ENABLE_SEED_POSITIONS if (r_idx < re->min_kmer_pos || !(bitmap_long_extract(seed[sn].positions, 1, MAX_SEED_POSITIONS_BITMAP_SIZE, r_idx))) { continue; } #else if (r_idx < re->min_kmer_pos) { continue; } #endif #ifdef ENABLE_LOW_QUALITY_FILTER if (Qflag && SQFflag && is_low_quality_read_subsequence(re->filter_qual, r_idx, seed[sn])) { re->mapidx[st][sn*re->max_n_kmers + (r_idx - re->min_kmer_pos)] = 0; continue; } #endif re->mapidx[st][sn*re->max_n_kmers + (r_idx - re->min_kmer_pos)] = KMER_TO_MAPIDX(kmerWindow, sn); } } //free(kmerWindow); } /* * Extract spaced kmers from read, save them in re->mapidx. */ static inline void read_get_mapidxs(struct read_entry * re) { read_get_mapidxs_per_strand(re, 0); read_get_mapidxs_per_strand(re, 1); #ifdef DEBUG_KMERS { uint sn, i, j; fprintf(stderr, "max_n_kmers:%u, min_kmer_pos:%u\n", re->max_n_kmers, re->min_kmer_pos); fprintf(stderr, "collapsed kmers from read:\n"); for (sn = 0; sn < n_seeds; sn++) { fprintf(stderr, "sn:%u\n", sn); for (i = 0; re->min_kmer_pos + i + seed[sn].span <= re->read_len; i++) { fprintf(stderr, "\tpos:%u kmer:", re->min_kmer_pos + i); for (j = 0; j < seed[sn].weight; j++) { fprintf(stderr, "%c%s", base_translate((re->mapidx[0][sn*re->max_n_kmers + i] >> 2*(seed[sn].weight - 1 - j)) & 0x3, shrimp_mode == MODE_COLOUR_SPACE), j < seed[sn].weight - 1? "," : "\n"); } } } fprintf(stderr, "collapsed kmers from read_rc:\n"); for (sn = 0; sn < n_seeds; sn++) { fprintf(stderr, "sn:%u\n", sn); for (i = 0; re->min_kmer_pos + i + seed[sn].span <= re->read_len; i++) { fprintf(stderr, "\tpos:%u kmer:", re->min_kmer_pos + i); for (j = 0; j < seed[sn].weight; j++) { fprintf(stderr, "%c%s", base_translate((re->mapidx[1][sn*re->max_n_kmers + i] >> 2*(seed[sn].weight - 1 - j)) & 0x3, shrimp_mode == MODE_COLOUR_SPACE), j < seed[sn].weight - 1? "," : "\n"); } } } } #endif } /* static int bin_search(uint32_t * array, int l, int r, uint32_t value) { int m; while (l + 1 < r) { m = (l + r - 1)/2; if (array[m] < value) l = m + 1; else r = m + 1; } if (l < r && array[l] < value) return l + 1; else return l; } static void read_get_restricted_anchor_list_per_strand(struct read_entry * re, int st, bool collapse) { int i, j, offset, sn; llint g_start, g_end; int idx_start, idx_end, k; uint32_t mapidx; int anchor_cache[re->read_len]; uint diag; int l; assert(0); // unmaintained!! assert(re->mapidx[st] != NULL); re->n_anchors[st] = 0; if ((st == 0 && !Fflag) || (st == 1 && !Cflag)) return; for (j = 0; j < re->n_ranges; j++) { if (re->ranges[j].st != st) continue; g_start = (llint)contig_offsets[re->ranges[j].cn] + re->ranges[j].g_start; g_end = (llint)contig_offsets[re->ranges[j].cn] + re->ranges[j].g_end; for (sn = 0; sn < n_seeds; sn++) { for (i = 0; re->min_kmer_pos + i + seed[sn].span - 1 < re->read_len; i++) { offset = sn*re->max_n_kmers + i; mapidx = re->mapidx[st][offset]; idx_start = bin_search(genomemap[sn][mapidx], 0, (int)genomemap_len[sn][mapidx], g_start); idx_end = bin_search(genomemap[sn][mapidx], idx_start, (int)genomemap_len[sn][mapidx], g_end + 1); if (idx_start >= idx_end) continue; re->anchors[st] = (struct anchor *)xrealloc(re->anchors[st], sizeof(re->anchors[0][0]) * (re->n_anchors[st] + (idx_end - idx_start))); for (k = 0; idx_start + k < idx_end; k++) { re->anchors[st][re->n_anchors[st] + k].cn = re->ranges[j].cn; re->anchors[st][re->n_anchors[st] + k].x = genomemap[sn][mapidx][idx_start + k] - contig_offsets[re->ranges[j].cn]; re->anchors[st][re->n_anchors[st] + k].y = re->min_kmer_pos + i; re->anchors[st][re->n_anchors[st] + k].length = seed[sn].span; re->anchors[st][re->n_anchors[st] + k].weight = 1; } re->n_anchors[st] += (int)(idx_end - idx_start); } } } qsort(re->anchors[st], re->n_anchors[st], sizeof(re->anchors[0][0]), anchor_uw_cmp); if (collapse) { for (i = 0; i < re->read_len; i++) anchor_cache[i] = -1; for (k = 0, i = 0; i < re->n_anchors[st]; i++) { re->anchors[st][k] = re->anchors[st][i]; diag = (re->anchors[st][k].x + re->read_len - re->anchors[st][k].y) % re->read_len; l = anchor_cache[diag]; if (l >= 0 && re->anchors[st][l].cn == re->anchors[st][k].cn && anchor_uw_intersect(&re->anchors[st][l], &re->anchors[st][k])) { anchor_uw_join(&re->anchors[st][l], &re->anchors[st][k]); } else { anchor_cache[diag] = k; k++; } } re->n_anchors[st] = k; } } */ #if defined (DEBUG_HIT_LIST_CREATION) || defined (DEBUG_HIT_LIST_PAIR_UP) \ || defined (DEBUG_HIT_LIST_PASS1) || defined (DEBUG_HIT_LIST_PAIRED_HITS) static void dump_hit(struct read_hit * hit) { fprintf(stderr, "(cn:%d,st:%d,gen_st:%d,g_off:%lld,w_len:%d,scores:(wg=%d,vc=%d,fl=%d,poster=%.5g)," "matches:%d,pair_min:%d,pair_max:%d,anchor:(x=%lld,y=%lld,ln=%d,wd=%d))\n", hit->cn, hit->st, hit->gen_st, hit->g_off_pos_strand, hit->w_len, hit->score_window_gen, hit->score_vector, hit->score_full, hit->sfrp != NULL? hit->sfrp->posterior : -1.0, hit->matches, hit->pair_min, hit->pair_max, hit->anchor.x, hit->anchor.y, hit->anchor.length, hit->anchor.width); } #endif #if defined (DEBUG_HIT_LIST_CREATION) || defined (DEBUG_HIT_LIST_PAIR_UP) \ || defined (DEBUG_HIT_LIST_PASS1) static void dump_hit_list(struct read_entry * re, int st, bool only_paired, bool only_after_vector) { int i; for (i = 0; i < (int)re->n_hits[st]; i++) { if (only_paired && re->hits[st][i].pair_min < 0) continue; if (only_after_vector && re->hits[st][i].score_vector < 0) continue; dump_hit(&re->hits[st][i]); } } #endif /* * Reverse read hit. * * The 'st' strand of the read matches the 'gen_st' strand of the genome. Negate both. */ static inline void reverse_hit(struct read_entry * re, struct read_hit * rh) { assert(re != NULL && rh != NULL); rh->g_off = genome_len[rh->cn] - rh->g_off - rh->w_len; anchor_reverse(&rh->anchor, rh->w_len, re->read_len); rh->gen_st = 1 - rh->gen_st; rh->st = 1 - rh->st; } static void readpair_pair_up_hits(struct read_entry * re1, struct read_entry * re2) { int st1, st2, i, j, k, l; for (st1 = 0; st1 < 2; st1++) { st2 = 1 - st1; // opposite strand j = 0; // invariant: matching hit at index j or larger for (i = 0; i < re1->n_hits[st1]; i++) { // find matching hit, if any while (j < re2->n_hits[st2] && (re2->hits[st2][j].cn < re1->hits[st1][i].cn // prev contig || (re2->hits[st2][j].cn == re1->hits[st1][i].cn // same contig, but too close && (int64_t)(re2->hits[st2][j].g_off ) < (int64_t)(re1->hits[st1][i].g_off) + (int64_t)re1->delta_g_off_min[st1] ) ) ) { j++; } k = j; while (k < re2->n_hits[st2] && re2->hits[st2][k].cn == re1->hits[st1][i].cn && (int64_t)(re2->hits[st2][k].g_off) <= (int64_t)(re1->hits[st1][i].g_off) + (int64_t)re1->delta_g_off_max[st1] ) { k++; } //fprintf(stderr,"DONE\n"); if (j == k) { //fprintf(stderr,"no paired hit\n"); continue; } re1->hits[st1][i].pair_min = j; re1->hits[st1][i].pair_max = k-1; for (l = j; l < k; l++) { if (re2->hits[st2][l].pair_min < 0) { re2->hits[st2][l].pair_min = i; } re2->hits[st2][l].pair_max = i; } } } #ifdef DEBUG_HIT_LIST_PAIR_UP fprintf(stderr, "Dumping hit list after pair-up for read:[%s]\n", re1->name); dump_hit_list(re1, 0, false, false); dump_hit_list(re1, 1, false, false); fprintf(stderr, ".. and read:[%s]\n", re2->name); dump_hit_list(re2, 0, false, false); dump_hit_list(re2, 1, false, false); #endif } /* * Run full SW filter on this hit. */ static void hit_run_full_sw(struct read_entry * re, struct read_hit * rh, int thresh) { uint32_t * gen = NULL; assert(re != NULL && rh != NULL); /* if (rh->gen_st!=0) { fprintf(stderr,"[%s] rh->gen_st is %d\n%d and %d\n",re->name, rh->gen_st,min_insert_size,max_insert_size); } assert(rh->gen_st == 0); if (rh->st == re->input_strand) { gen = genome_contigs[rh->cn]; } else { reverse_hit(re, rh); //fprintf(stderr, "reverse_hit from hit_run_full_sw [%s]\n", re->name); gen = genome_contigs_rc[rh->cn]; } */ if (rh->st != re->input_strand) { reverse_hit(re, rh); } if (rh->gen_st == 0) { gen = genome_contigs[rh->cn]; } else { gen = genome_contigs_rc[rh->cn]; } // allocate sfrp struct assert(rh->sfrp == NULL); rh->sfrp = (struct sw_full_results *)my_calloc(sizeof(rh->sfrp[0]), &mem_mapping, "sfrp [%s]", re->name); rh->sfrp->in_use = false; rh->sfrp->mqv = 255; // unavailable #ifdef DEBUG_SW_FULL_CALLS fprintf(stderr, "SW full call: (name:[%s],cn:%d,st:%d,gen_st:%d,g_off:%lld,w_len:%d,anchor:(%lld,%lld,%d,%d))\n", re->name, rh->cn, rh->st, rh->gen_st, rh->g_off, rh->w_len, rh->anchor.x, rh->anchor.y, rh->anchor.length, rh->anchor.width); #endif if (shrimp_mode == MODE_COLOUR_SPACE) { sw_full_cs(gen, rh->g_off, rh->w_len, re->read[rh->st], re->read_len, re->initbp[rh->st], thresh, rh->sfrp, rh->gen_st && Tflag, genome_is_rna, &rh->anchor, 1,Gflag ? 0 : 1, re->crossover_score); } else { /* * The full SW in letter space assumes it's given the correct max score. * This might not be true just yet if we're using hashing&caching because * of possible hash collosions. */ rh->score_vector = sw_vector(gen, rh->g_off, rh->w_len, re->read[rh->st], re->read_len, NULL, -1, genome_is_rna); if (rh->score_vector >= thresh) { sw_full_ls(gen, rh->g_off, rh->w_len, re->read[rh->st], re->read_len, thresh, rh->score_vector, rh->sfrp, rh->gen_st && Tflag, &rh->anchor, 1, Gflag ? 0 : 1); //assert(rh->sfrp->score == rh->score_vector); } else { // this wouldn't have passed the filter rh->sfrp->score = 0; } } rh->score_full = rh->sfrp->score; rh->pct_score_full = (1000 * 100 * rh->score_full)/rh->score_max; } int get_insert_size(struct read_hit *rh, struct read_hit * rh_mp) { if (rh_mp == NULL || rh == NULL || rh->cn != rh_mp->cn) { return 0; } //get the mate pair info int read_start_mp = rh_mp->sfrp->read_start+1; //1based int read_end_mp = read_start_mp + rh_mp->sfrp->rmapped -1; //1base int genome_length_mp = genome_len[rh_mp->cn]; int genome_start_mp; bool reverse_strand_mp = (rh_mp->gen_st ==1); if (!reverse_strand_mp) { genome_start_mp = rh_mp->sfrp->genome_start+1; // 0 based -> 1 based } else { int genome_right_most_coordinate = genome_length_mp - rh_mp->sfrp->genome_start; //rh->sfrp->deletions is deletions in the reference // This is when the read has extra characters that dont match into ref genome_start_mp = genome_right_most_coordinate - (read_end_mp - read_start_mp - rh_mp->sfrp->deletions + rh_mp->sfrp->insertions); } int genome_end_mp=genome_start_mp+rh_mp->sfrp->gmapped-1; //get the other hit info int genome_start; bool reverse_strand=(rh->gen_st == 1); if (!reverse_strand) { genome_start = rh->sfrp->genome_start+1; // 0 based -> 1 based } else { int read_start = rh->sfrp->read_start+1; //1based int read_end = read_start + rh->sfrp->rmapped -1; //1base int genome_length = genome_len[rh->cn]; int genome_right_most_coordinate = genome_length - rh->sfrp->genome_start; //rh->sfrp->deletions is deletions in the reference // This is when the read has extra characters that dont match into ref genome_start = genome_right_most_coordinate - (read_end - read_start - rh->sfrp->deletions + rh->sfrp->insertions); } int genome_end=genome_start+rh->sfrp->gmapped-1; int fivep = 0; int fivep_mp = 0; if (reverse_strand){ fivep = genome_end; } else { fivep = genome_start - 1; } if (reverse_strand_mp){ fivep_mp = genome_end_mp; } else { fivep_mp = genome_start_mp-1; } return (fivep_mp - fivep); } static void read_get_region_counts(struct read_entry * re, int st, struct regions_options * options) { int sn, i, offset, region; uint j; //llint before = gettimeinusecs(); //llint before = rdtsc(), after; TIME_COUNTER_START(tpg.region_counts_tc); int number_in_pair = re->first_in_pair? 0 : 1; assert(use_regions); if (region_map_id == 0) { region_map_id = 1; for (int _nip = 0; _nip < 2; _nip++) { for (int _st = 0; _st < 2; _st++) { //free(region_map[_nip][_st]); my_free(region_map[_nip][_st], n_regions * sizeof(region_map[0][0][0]), &mem_mapping, "region_map"); //region_map[_nip][_st] = (int32_t *)xcalloc(n_regions * sizeof(region_map[0][0][0])); region_map[_nip][_st] = (region_map_t *) my_calloc(n_regions * sizeof(region_map[0][0][0]), &mem_mapping, "region_map"); //memset(region_map[_nip][_st], 0, n_regions * sizeof(region_map[0][0][0])); } } } assert(region_map[0][0] != NULL); for (sn = 0; //(options->min_seed >= 0? options->min_seed : 0); sn <= n_seeds - 1; //(options->max_seed >= 0? options->max_seed: n_seeds - 1); sn++) { for (i = 0; re->min_kmer_pos + i + seed[sn].span - 1 < re->read_len; i++) { offset = sn*re->max_n_kmers + i; if (genomemap_len[sn][re->mapidx[st][offset]] > list_cutoff) continue; for (j = 0; j < genomemap_len[sn][re->mapidx[st][offset]]; j++) { #ifdef USE_PREFETCH if (j + 4 < genomemap_len[sn][re->mapidx[st][offset]]) { int region_ahead = (int)(genomemap[sn][re->mapidx[st][offset]][j + 4] >> region_bits); _mm_prefetch((char *)&region_map[number_in_pair][st][region_ahead], _MM_HINT_T0); } #endif region = (int)(genomemap[sn][re->mapidx[st][offset]][j] >> region_bits); // BEGIN COPY if (RG_GET_MAP_ID(region_map[number_in_pair][st][region]) == region_map_id) { // a previous kmer set it, so there are >=2 kmers in this region RG_SET_HAS_2(region_map[number_in_pair][st][region]); } else { region_map[number_in_pair][st][region] = 0; // clear old entry RG_SET_MAP_ID(region_map[number_in_pair][st][region], region_map_id); // set new id } // END COPY // extend regions by region_overlap if ((genomemap[sn][re->mapidx[st][offset]][j] & ((1 << region_bits) - 1)) < (uint)region_overlap && region > 0) { region--; // BEGIN PASTE if (RG_GET_MAP_ID(region_map[number_in_pair][st][region]) == region_map_id) { // a previous kmer set it, so there are >=2 kmers in this region RG_SET_HAS_2(region_map[number_in_pair][st][region]); } else { region_map[number_in_pair][st][region] = 0; // clear old entry RG_SET_MAP_ID(region_map[number_in_pair][st][region], region_map_id); // set new id } // END PASTE } } } } //region_counts_usecs[omp_get_thread_num()] += gettimeinusecs() - before; //after = rdtsc(); //tpg.region_counts_ticks += MAX(after - before, 0); TIME_COUNTER_STOP(tpg.region_counts_tc); } static void read_get_mp_region_counts(struct read_entry * re, int st) { //llint before = rdtsc(), after; TIME_COUNTER_START(tpg.mp_region_counts_tc); int nip, sn, i, offset, region; int first, last, max, k; unsigned int j; nip = re->first_in_pair? 0 : 1; for (sn = 0; sn < n_seeds; sn++) { for (i = 0; re->min_kmer_pos + i + seed[sn].span - 1 < re->read_len; i++) { offset = sn*re->max_n_kmers + i; if (genomemap_len[sn][re->mapidx[st][offset]] > list_cutoff) continue; for (j = 0; j < genomemap_len[sn][re->mapidx[st][offset]]; j++) { #ifdef USE_PREFETCH if (j + 4 < genomemap_len[sn][re->mapidx[st][offset]]) { int region_ahead = (int)(genomemap[sn][re->mapidx[st][offset]][j + 4] >> region_bits); _mm_prefetch((char *)&region_map[nip][st][region_ahead], _MM_HINT_T0); _mm_prefetch((char *)&region_map[1-nip][1-st][region_ahead], _MM_HINT_T0); } #endif region = (int)(genomemap[sn][re->mapidx[st][offset]][j] >> region_bits); if (!RG_VALID_MP_CNT(region_map[nip][st][region])) { first = MAX(0, region + re->delta_region_min[st]); last = MIN(n_regions - 1, region + re->delta_region_max[st]); max = 0; for (k = first; k <= last && max < 2; k++) { if (RG_GET_MAP_ID(region_map[1-nip][1-st][k]) == region_map_id) { max = (RG_GET_HAS_2(region_map[1-nip][1-st][k]) ? 2 : 1); } } RG_SET_MP_CNT(region_map[nip][st][region], max); } if (region > 0 && (genomemap[sn][re->mapidx[st][offset]][j] & ((1 << region_bits) - 1)) < (uint)region_overlap) { region--; if (!RG_VALID_MP_CNT(region_map[nip][st][region])) { first = MAX(0, region + re->delta_region_min[st]); last = MIN(n_regions - 1, region + re->delta_region_max[st]); max = 0; for (k = first; k <= last && max < 2; k++) { if (RG_GET_MAP_ID(region_map[1-nip][1-st][k]) == region_map_id) { max = (RG_GET_HAS_2(region_map[1-nip][1-st][k]) ? 2 : 1); } } RG_SET_MP_CNT(region_map[nip][st][region], max); } } } } } //after = rdtsc(); //tpg.mp_region_counts_ticks += MAX(after - before, 0); TIME_COUNTER_STOP(tpg.mp_region_counts_tc); } /* static void read_get_mp_region_counts_per_strand(struct read_entry * re, int st, struct read_entry * re_mp, struct regions_options * options) { int i, first, last, j, max; for (i = 0; i < n_regions; i++) { max = 0; first = i + re->delta_region_min[st]; if (first < 0) first = 0; last = i + re->delta_region_max[st]; if (last > n_regions - 1) last = n_regions - 1; for (j = first; j <= last; j++) { if (re_mp->region_map[1-st][0][j] == re_mp->region_map_id && re_mp->region_map[1-st][1][j] > max) max = re_mp->region_map[1-st][1][j]; } re->region_map[st][2][i] = max; } } static inline void read_get_mp_region_counts(struct read_entry * re, struct read_entry * re_mp, struct regions_options * options) { read_get_mp_region_counts_per_strand(re, 0, re_mp, options); read_get_mp_region_counts_per_strand(re, 1, re_mp, options); } */ static inline void advance_index_in_genomemap(struct read_entry * re, int st, struct anchor_list_options * options, uint * idx, uint max_idx, uint32_t * map, int * anchors_discarded) { //int first, last, max, k; int nip = re->first_in_pair? 0 : 1; int count_main, count_mp; while (*idx < max_idx) { #ifdef USE_PREFETCH if (*idx + 2 < max_idx) { int region_ahead = (int)(map[*idx + 2] >> region_bits); _mm_prefetch((char *)&region_map[nip][st][region_ahead], _MM_HINT_T0); } #endif int region = (int)(map[*idx] >> region_bits); assert(RG_GET_MAP_ID(region_map[nip][st][region]) == region_map_id); // if necessary, compute the mp counts if (options->use_mp_region_counts != 0) { /* if (!RG_VALID_MP_CNT(region_map[nip][st][region])) { first = MAX(0, region + re->delta_region_min[st]); last = MIN(n_regions - 1, region + re->delta_region_max[st]); max = 0; for (k = first; k <= last && max < 2; k++) { if (RG_GET_MAP_ID(region_map[1-nip][1-st][k]) == region_map_id) { max = (RG_GET_HAS_2(region_map[1-nip][1-st][k]) ? 2 : 1); } } RG_SET_MP_CNT(region_map[nip][st][region], max); count_mp = max; } if (region > 0 && (map[*idx] & ((1 << region_bits) - 1)) < (uint)region_overlap) { region--; if (!RG_VALID_MP_CNT(region_map[nip][st][region])) { first = MAX(0, region + re->delta_region_min[st]); last = MIN(n_regions - 1, region + re->delta_region_max[st]); max = 0; for (k = first; k <= last && max < 2; k++) { if (RG_GET_MAP_ID(region_map[1-nip][1-st][k]) == region_map_id) { max = (RG_GET_HAS_2(region_map[1-nip][1-st][k]) ? 2 : 1); } } RG_SET_MP_CNT(region_map[nip][st][region], max); count_mp = max; } region++; } */ // BEGIN COPY count_main = (RG_GET_HAS_2(region_map[nip][st][region]) ? 2 : 1); count_mp = RG_GET_MP_CNT(region_map[nip][st][region]); if ((options->use_mp_region_counts == 1 && (count_main >= 2 && count_mp >= 2)) || (options->use_mp_region_counts == 2 && (count_main >= 2 || count_mp >= 2)) || (options->use_mp_region_counts == 3 && (count_mp >= 1 && (count_main + count_mp) >= 3)) ) break; // END COPY if (region > 0 && (map[*idx] & ((1 << region_bits) - 1)) < (uint)region_overlap) { region--; //BEGIN PASTE count_main = (RG_GET_HAS_2(region_map[nip][st][region]) ? 2 : 1); count_mp = RG_GET_MP_CNT(region_map[nip][st][region]); if ((options->use_mp_region_counts == 1 && (count_main >= 2 && count_mp >= 2)) || (options->use_mp_region_counts == 2 && (count_main >= 2 || count_mp >= 2)) || (options->use_mp_region_counts == 3 && (count_mp >= 1 && (count_main + count_mp) >= 3)) ) break; //END PASTE } } else // don't use mp counts at all { if (RG_GET_HAS_2(region_map[nip][st][region])) break; if (region > 0 && (map[*idx] & ((1 << region_bits) - 1)) < (uint)region_overlap) { region--; if (RG_GET_HAS_2(region_map[nip][st][region])) break; } } /* if ((options->min_count[0] == 0 || options->min_count[0] <= (region_map[nip][st][region] & ((1 << 8) - 1))) && (options->max_count[0] == 0 || options->max_count[0] >= (region_map[nip][st][region] & ((1 << 8) - 1)))) { if (options->min_count[1] != 0 && options->max_count[1] != 0) break; if ((region_map[nip][st][region] >> 31) == 0) { // compute mp counts first = MAX(0, region + re->delta_region_min[st]); last = MIN(n_regions - 1, region + re->delta_region_max[st]); max = 0; for (k = first; k <= last; k++) { if (((region_map[1-nip][1-st][k] >> 16) & ((1 << region_map_id_bits) - 1)) == region_map_id && (region_map[1-nip][1-st][k] & ((1 << 8) - 1)) > max) { max = (region_map[1-nip][1-st][k] & ((1 << 8) - 1)); } } region_map[nip][st][region] |= (1 << 31); region_map[nip][st][region] |= (max << 8); } if ((options->min_count[1] == 0 || options->min_count[1] <= ((region_map[nip][st][region] >> 8) & ((1 << 8) - 1))) && (options->max_count[1] == 0 || options->max_count[1] >= ((region_map[nip][st][region] >> 8) & ((1 << 8) - 1))) ) break; } if ((map[*idx] & ((1 << region_bits) - 1)) < (uint)region_overlap && region > 0) { region--; // copy-paste from above -- SERIOUSLY if ((options->min_count[0] == 0 || options->min_count[0] <= (region_map[nip][st][region] & ((1 << 8) - 1))) && (options->max_count[0] == 0 || options->max_count[0] >= (region_map[nip][st][region] & ((1 << 8) - 1)))) { if (options->min_count[1] != 0 && options->max_count[1] != 0) break; if ((region_map[nip][st][region] >> 31) == 0) { // compute mp counts first = MAX(0, region + re->delta_region_min[st]); last = MIN(n_regions - 1, region + re->delta_region_max[st]); max = 0; for (k = first; k <= last; k++) { if (((region_map[1-nip][1-st][k] >> 16) & ((1 << region_map_id_bits) - 1)) == region_map_id && (region_map[1-nip][1-st][k] & ((1 << 8) - 1)) > max) { max = (region_map[1-nip][1-st][k] & ((1 << 8) - 1)); } } region_map[nip][st][region] |= (1 << 31); region_map[nip][st][region] |= (max << 8); } if ((options->min_count[1] == 0 || options->min_count[1] <= ((region_map[nip][st][region] >> 8) & ((1 << 8) - 1))) && (options->max_count[1] == 0 || options->max_count[1] >= ((region_map[nip][st][region] >> 8) & ((1 << 8) - 1))) ) break; } } */ (*anchors_discarded)++; (*idx)++; } } /* static void expand_anchor(read_entry * re, int st, anchor * a) { uint32_t * db = (shrimp_mode == MODE_LETTER_SPACE? genome_contigs[a->cn] : genome_cs_contigs[a->cn]); uint32_t * qr = re->read[st]; llint x = (llint)a->x - (llint)contig_offsets[a->cn]; int y = a->y; int i, sc; int mm_score = (shrimp_mode == MODE_LETTER_SPACE? mismatch_score : match_score + crossover_score); // first, compute the real number of matches assert(x + a->length <= genome_len[a->cn]); assert(y + a->length <= re->read_len); sc = 0; for (i = 0; i < a->length; i++) { sc += (EXTRACT(db, x + (llint)i) == EXTRACT(qr, y + i) ? match_score : mm_score); } // extend backward int min_i = 0; int max_sc = sc; for (i = -1; x + i >= 0 && y + i >= 0; i--) { sc += (EXTRACT(db, x + (llint)i) == EXTRACT(qr, y + i) ? match_score : mm_score); if (sc < 0 || sc < max_sc + 5 * mm_score) break; if (sc > max_sc) { min_i = i; max_sc = sc; } } // extend forward int max_i = a->length - 1; for (i = max_i + 1; x + i < genome_len[a->cn] && y + i < re->read_len; i++) { sc += (EXTRACT(db, x + i) == EXTRACT(qr, y + i) ? match_score : mm_score); if (sc < 0 || sc < max_sc + 5 * mm_score) break; if (sc > max_sc) { max_i = i; max_sc = sc; } } // adjust anchor a->x += min_i; a->y += min_i; a->length = max_i - min_i + 1; //if (a->length > 20) a->weight++; a->score = max_sc; assert(a->score <= a->length * match_score && a->score >= a->length * mm_score); } */ static void read_get_anchor_list_per_strand(struct read_entry * re, int st, struct anchor_list_options * options) { uint list_sz; uint offset; int i, sn; //uint * idx; struct heap_uu h; struct heap_uu_elem tmp; int anchor_cache[re->read_len]; int anchors_discarded = 0; int big_gaps = 0; assert(re != NULL && options != NULL); assert(re->anchors[st] == NULL && re->n_anchors[st] == 0); if (re->mapidx[st] == NULL) return; if ((st == 0 && !Fflag) || (st == 1 && !Cflag)) return; // compute estimate size of anchor list list_sz = 0; for (sn = 0; sn < n_seeds; sn++) { for (i = 0; re->min_kmer_pos + i + seed[sn].span - 1 < re->read_len; i++) { offset = sn*re->max_n_kmers + i; if (genomemap_len[sn][re->mapidx[st][offset]] > list_cutoff) continue; list_sz += genomemap_len[sn][re->mapidx[st][offset]]; } } stat_add(&tpg.anchor_list_init_size, list_sz); // init anchor list //re->anchors[st] = (struct anchor *)xmalloc(list_sz * sizeof(re->anchors[0][0])); re->anchors[st] = (struct anchor *) my_malloc(list_sz * sizeof(re->anchors[0][0]), &mem_mapping, "anchors [%s]", re->name); // init min heap, indices in genomemap lists, and anchor_cache heap_uu_init(&h, n_seeds * re->max_n_kmers); //idx = (uint *)xcalloc(n_seeds * re->max_n_kmers * sizeof(idx[0])); //uint32_t * idx = (uint32_t *) // my_calloc(n_seeds * re->max_n_kmers * sizeof(idx[0]), &mem_mapping, "idx for read [%s]", re->name); uint32_t idx[n_seeds * re->max_n_kmers]; memset(idx, 0, n_seeds * re->max_n_kmers * sizeof(idx[0])); for (i = 0; i < re->read_len; i++) anchor_cache[i] = -1; // load inital anchors in min heap for (sn = 0; sn < n_seeds; sn++) { for (i = 0; re->min_kmer_pos + i + seed[sn].span - 1 < re->read_len; i++) { #ifdef ENABLE_SEED_POSITIONS if (!(bitmap_long_extract(seed[sn].positions, 1, MAX_SEED_POSITIONS_BITMAP_SIZE, i))) { continue; } #endif #ifdef ENABLE_LOW_QUALITY_FILTER if (Qflag && SQFflag && is_low_quality_read_subsequence(re->filter_qual, i, seed[sn])) { continue; } #endif offset = sn*re->max_n_kmers + i; if (genomemap_len[sn][re->mapidx[st][offset]] > list_cutoff) { idx[offset] = genomemap_len[sn][re->mapidx[st][offset]]; } if (options->use_region_counts) { advance_index_in_genomemap(re, st, options, &idx[offset], genomemap_len[sn][re->mapidx[st][offset]], genomemap[sn][re->mapidx[st][offset]], &anchors_discarded); } if (idx[offset] < genomemap_len[sn][re->mapidx[st][offset]]) { tmp.key = genomemap[sn][re->mapidx[st][offset]][idx[offset]]; tmp.rest = offset; heap_uu_insert(&h, &tmp); idx[offset]++; } } } while (h.load > 0) { // extract min heap_uu_get_min(&h, &tmp); // add to anchor list offset = tmp.rest; sn = offset / re->max_n_kmers; i = offset % re->max_n_kmers; re->anchors[st][re->n_anchors[st]].x = tmp.key; re->anchors[st][re->n_anchors[st]].y = re->min_kmer_pos + i; re->anchors[st][re->n_anchors[st]].length = seed[sn].span; re->anchors[st][re->n_anchors[st]].width = 1; re->anchors[st][re->n_anchors[st]].weight = 1; get_contig_num(re->anchors[st][re->n_anchors[st]].x, &re->anchors[st][re->n_anchors[st]].cn); if (re->n_anchors[st] > 0 && tmp.key - re->anchors[st][re->n_anchors[st] - 1].x >= anchor_list_big_gap) big_gaps++; re->n_anchors[st]++; if (options->collapse) { // check if current anchor intersects the cached one on the same diagonal uint diag = (re->anchors[st][re->n_anchors[st]-1].x + re->read_len - re->anchors[st][re->n_anchors[st]-1].y) % re->read_len; int j = anchor_cache[diag]; if (j >= 0 && re->anchors[st][j].cn == re->anchors[st][re->n_anchors[st]-1].cn //&& anchor_uw_intersect(&re->anchors[st][j], &re->anchors[st][re->n_anchors[st]-1])) { && anchor_uw_colinear(&re->anchors[st][j], &re->anchors[st][re->n_anchors[st]-1])) { anchor_uw_join(&re->anchors[st][j], &re->anchors[st][re->n_anchors[st]-1]); re->n_anchors[st]--; } else { anchor_cache[diag] = re->n_anchors[st]-1; } } if (options->use_region_counts) { advance_index_in_genomemap(re, st, options, &idx[offset], genomemap_len[sn][re->mapidx[st][offset]], genomemap[sn][re->mapidx[st][offset]], &anchors_discarded); } // load next anchor for that seed/mapidx if (idx[offset] < genomemap_len[sn][re->mapidx[st][offset]]) { tmp.key = genomemap[sn][re->mapidx[st][offset]][idx[offset]]; tmp.rest = offset; heap_uu_replace_min(&h, &tmp); idx[offset]++; } else { heap_uu_extract_min(&h, &tmp); } } heap_uu_destroy(&h); //free(idx); //my_free(idx, n_seeds * re->max_n_kmers * sizeof(idx[0]), &mem_mapping, "idx"); re->anchors[st] = (struct anchor *) my_realloc(re->anchors[st], re->n_anchors[st] * sizeof(re->anchors[0][0]), list_sz * sizeof(re->anchors[0][0]), &mem_mapping, "anchors [%s]", re->name); //if (hack) // for (i = 0; i < re->n_anchors[st]; i++) { // expand_anchor(re, st, &re->anchors[st][i]); // } stat_add(&tpg.n_anchors_discarded, anchors_discarded); stat_add(&tpg.n_big_gaps_anchor_list, big_gaps); } static inline void read_get_anchor_list(struct read_entry * re, struct anchor_list_options * options) { //llint before = gettimeinusecs(); //llint before = rdtsc(), after; TIME_COUNTER_START(tpg.anchor_list_tc); read_get_anchor_list_per_strand(re, 0, options); read_get_anchor_list_per_strand(re, 1, options); //anchor_list_usecs[omp_get_thread_num()] += gettimeinusecs() - before; //after = rdtsc(); //tpg.anchor_list_ticks += MAX(after - before, 0); TIME_COUNTER_STOP(tpg.anchor_list_tc); } static void read_get_hit_list_per_strand(struct read_entry * re, int st, struct hit_list_options * options) { llint goff, gstart, gend; int max_score, tmp_score = 0; int i, j, cn, max_idx; int w_len; int short_len = 0, long_len = 0; int heavy_mp = false; // init not needed int gap_open_score, gap_extend_score; struct anchor a[3]; assert(re != NULL && options != NULL); assert(re->hits[st] == NULL && re->n_hits[st] == 0); if (re->n_anchors[st] == 0) return; //re->hits[st] = (struct read_hit *)xcalloc(re->n_anchors[st] * sizeof(re->hits[0][0])); re->hits[st] = (struct read_hit *) my_calloc(re->n_anchors[st] * sizeof(re->hits[0][0]), &mem_mapping, "hits [%s]", re->name); for (i = 0; i < re->n_anchors[st]; i++) { // contig num of crt anchor cn = re->anchors[st][i].cn; // w_len w_len = re->window_len; if ((uint32_t)w_len > genome_len[cn]) w_len = (int)genome_len[cn]; // set gstart and gend gend = (re->anchors[st][i].x - contig_offsets[cn]) + re->read_len - 1 - re->anchors[st][i].y; if (gend > genome_len[cn] - 1) { gend = genome_len[cn] - 1; } if (gend >= re->window_len) { gstart = gend - re->window_len; } else { gstart = 0; } /* * Modes of matching: * - gapless: only extend current anchor; no window_gen_threshold check * - n=1: one kmer match & no window_gen_threshold check; or at least two matches * - n=2: at least two kmer matches & window_gen_threshold check * - n=3: one kmer & >=2 kmers for mp */ max_idx = i; //if (!hack) max_score = re->anchors[st][i].length * match_score; //else // max_score = re->anchors[st][i].score; if (options->match_mode == 3) { int region = re->anchors[st][i].x >> region_bits; assert(RG_VALID_MP_CNT(region_map[re->first_in_pair? 0 : 1][st][region])); heavy_mp = (RG_GET_MP_CNT(region_map[re->first_in_pair? 0 : 1][st][region]) >= 2); if (!heavy_mp && region > 0 && (re->anchors[st][i].x & ((1 << region_bits) - 1)) < (uint)region_overlap) { region--; assert(RG_VALID_MP_CNT(region_map[re->first_in_pair? 0 : 1][st][region])); heavy_mp = (RG_GET_MP_CNT(region_map[re->first_in_pair? 0 : 1][st][region]) >= 2); } } if (!options->gapless) { // avoid single matches when n=2 if ((options->match_mode == 2 || (options->match_mode == 3 && !heavy_mp)) && re->anchors[st][i].weight == 1) // && !hack) max_score = -1; for (j = i - 1; j >= 0 && re->anchors[st][j].x >= (llint)contig_offsets[cn] + gstart; j--) { if (re->anchors[st][j].y >= re->anchors[st][i].y) { continue; } //if (hack // && re->anchors[st][j].x == re->anchors[st][i].x // && re->anchors[st][j].y == re->anchors[st][i].y) // continue; if (re->anchors[st][i].x - (llint)contig_offsets[cn] - re->anchors[st][i].y > re->anchors[st][j].x - (llint)contig_offsets[cn] - re->anchors[st][j].y) { // deletion in read short_len = (int)(re->anchors[st][i].y - re->anchors[st][j].y) + re->anchors[st][i].length; long_len = (int)(re->anchors[st][i].x - re->anchors[st][j].x) + re->anchors[st][i].length; gap_open_score = a_gap_open_score; gap_extend_score = a_gap_extend_score; } else { // insertion in read short_len = (int)(re->anchors[st][i].x - re->anchors[st][j].x) + re->anchors[st][i].length; long_len = (int)(re->anchors[st][i].y - re->anchors[st][j].y) + re->anchors[st][i].length; gap_open_score = b_gap_open_score; gap_extend_score = b_gap_extend_score; } assert(long_len >= short_len); //if (!hack) { if (long_len > short_len) { tmp_score = short_len * match_score + b_gap_open_score + (long_len - short_len) * b_gap_extend_score; } else { tmp_score = short_len * match_score; } //} else { // int missing_matches = abs(re->anchors[st][i].length + re->anchors[st][j].length - short_len); // tmp_score = re->anchors[st][i].score + re->anchors[st][j].score // + MAX(missing_matches - 5, 0) * (shrimp_mode == MODE_LETTER_SPACE? mismatch_score : match_score + crossover_score); // if (long_len > short_len) // tmp_score += gap_open_score + (long_len - short_len) * gap_extend_score; //} if (tmp_score > max_score) { max_idx = j; max_score = tmp_score; } } } if (options->gapless || options->match_mode == 1 || (options->match_mode == 3 && heavy_mp) || max_score >= (int)abs_or_pct(options->threshold, (re->read_len < w_len ? re->read_len : w_len) * match_score)) { // set goff int x_len = (int)(re->anchors[st][i].x - re->anchors[st][max_idx].x) + re->anchors[st][i].length; if ((re->window_len - x_len)/2 < re->anchors[st][max_idx].x - contig_offsets[cn]) { goff = (re->anchors[st][max_idx].x - contig_offsets[cn]) - (re->window_len - x_len)/2; } else { goff = 0; } if (goff + w_len > genome_len[cn]) { goff = genome_len[cn] - w_len; } // compute anchor if (max_idx < i) { a[0] = re->anchors[st][i]; anchor_to_relative(&a[0], contig_offsets[cn] + goff); a[1] = re->anchors[st][max_idx]; anchor_to_relative(&a[1], contig_offsets[cn] + goff); anchor_join(a, 2, &a[2]); } else { a[2] = re->anchors[st][i]; anchor_to_relative(&a[2], contig_offsets[cn] + goff); } // add hit re->hits[st][re->n_hits[st]].g_off = goff; re->hits[st][re->n_hits[st]].g_off_pos_strand = goff; re->hits[st][re->n_hits[st]].w_len = w_len; re->hits[st][re->n_hits[st]].cn = cn; re->hits[st][re->n_hits[st]].st = st; re->hits[st][re->n_hits[st]].gen_st = 0; re->hits[st][re->n_hits[st]].anchor = a[2]; re->hits[st][re->n_hits[st]].score_window_gen = max_score; re->hits[st][re->n_hits[st]].matches = (options->gapless || max_idx == i? re->anchors[st][i].weight : re->anchors[st][i].weight + re->anchors[st][max_idx].weight); re->hits[st][re->n_hits[st]].score_vector = -1; re->hits[st][re->n_hits[st]].score_full = -1; re->hits[st][re->n_hits[st]].score_max = (re->read_len < w_len? re->read_len : w_len) * match_score; re->hits[st][re->n_hits[st]].pair_min = -1; re->hits[st][re->n_hits[st]].pair_max = -1; re->hits[st][re->n_hits[st]].mapping_quality = 255; re->hits[st][re->n_hits[st]].saved = 0; re->hits[st][re->n_hits[st]].paired_hit_idx = NULL; re->hits[st][re->n_hits[st]].n_paired_hit_idx = 0; re->n_hits[st]++; } } // sort list (there might be few misordered pairs because of the goff computation for (i = 1; i < re->n_hits[st]; i++) { j = i; while (j >= 1 && re->hits[st][j-1].cn == re->hits[st][i].cn && re->hits[st][j-1].g_off > re->hits[st][i].g_off) j--; if (j < i) { // shift elements at indexes j..i-1 higher struct read_hit tmp = re->hits[st][i]; int k; for (k = i - 1; k >= j; k--) re->hits[st][k+1] = re->hits[st][k]; re->hits[st][j] = tmp; } } re->hits[st] = (struct read_hit *) my_realloc(re->hits[st], re->n_hits[st] * sizeof(re->hits[0][0]), re->n_anchors[st] * sizeof(re->hits[0][0]), &mem_mapping, "hits [%s]", re->name); } static inline void read_get_hit_list(struct read_entry * re, struct hit_list_options * options) { //llint before = gettimeinusecs(); //llint before = rdtsc(), after; TIME_COUNTER_START(tpg.hit_list_tc); read_get_hit_list_per_strand(re, 0, options); read_get_hit_list_per_strand(re, 1, options); //hit_list_usecs[omp_get_thread_num()] += gettimeinusecs() - before; //after = rdtsc(); //tpg.hit_list_ticks += MAX(after - before, 0); TIME_COUNTER_STOP(tpg.hit_list_tc); #ifdef DEBUG_HIT_LIST_CREATION fprintf(stderr, "Dumping hit list after creation for read:[%s]\n", re->name); dump_hit_list(re, 0, false, false); dump_hit_list(re, 1, false, false); #endif } static void read_pass1_per_strand(struct read_entry * re, int st, struct pass1_options * options) { int i; int last_good_cn = -1; unsigned int last_good_g_off = 0; // init not needed f1_hash_tag++; for (i = 0; i < re->n_hits[st]; i++) { if (options->only_paired && re->hits[st][i].pair_min < 0) { continue; } if (re->hits[st][i].matches < options->min_matches) { continue; } // if this hit is saved, leave it be, but update last_good if (re->hits[st][i].saved == 1) { last_good_cn = re->hits[st][i].cn; last_good_g_off = re->hits[st][i].g_off_pos_strand; continue; } // check window overlap if (last_good_cn >= 0 && re->hits[st][i].cn == last_good_cn && re->hits[st][i].g_off_pos_strand + (unsigned int)abs_or_pct(options->window_overlap, re->window_len) <= last_good_g_off + re->window_len) { re->hits[st][i].score_vector = 0; re->hits[st][i].pct_score_vector = 0; continue; } if (re->hits[st][i].score_vector <= 0) { if (shrimp_mode == MODE_COLOUR_SPACE) { uint32_t ** gen_cs; uint32_t ** gen_ls; struct read_hit * rh = &re->hits[st][i]; if (rh->st != re->input_strand) reverse_hit(re, &re->hits[st][i]); if (re->hits[st][i].gen_st == 0) { gen_cs = genome_cs_contigs; gen_ls = genome_contigs; } else { gen_cs = genome_cs_contigs_rc; gen_ls = genome_contigs_rc; } re->hits[st][i].score_vector = f1_run(gen_cs[re->hits[st][i].cn], genome_len[re->hits[st][i].cn], re->hits[st][i].g_off, re->hits[st][i].w_len, re->read[rh->st], re->read_len, re->hits[st][i].g_off + re->hits[st][i].anchor.x, re->hits[st][i].anchor.y, gen_ls[re->hits[st][i].cn], re->initbp[st], genome_is_rna, f1_hash_tag, options->gapless); } else { re->hits[st][i].score_vector = f1_run(genome_contigs[re->hits[st][i].cn], genome_len[re->hits[st][i].cn], re->hits[st][i].g_off, re->hits[st][i].w_len, re->read[st], re->read_len, re->hits[st][i].g_off + re->hits[st][i].anchor.x, re->hits[st][i].anchor.y, NULL, -1, genome_is_rna, f1_hash_tag, options->gapless); } re->hits[st][i].pct_score_vector = (1000 * 100 * re->hits[st][i].score_vector)/re->hits[st][i].score_max; if (re->hits[st][i].score_vector >= (int)abs_or_pct(options->threshold, re->hits[st][i].score_max)) { last_good_cn = re->hits[st][i].cn; last_good_g_off = re->hits[st][i].g_off_pos_strand; } } } } /* * Go through hit list, apply vector filter, and save top scores. */ static inline void read_pass1(struct read_entry * re, struct pass1_options * options) { //llint before = rdtsc(), after; TIME_COUNTER_START(tpg.pass1_tc); read_pass1_per_strand(re, 0, options); read_pass1_per_strand(re, 1, options); //after = rdtsc(); //tpg.pass1_ticks += MAX(after - before, 0); TIME_COUNTER_STOP(tpg.pass1_tc); #ifdef DEBUG_HIT_LIST_PASS1 fprintf(stderr, "Dumping hit list after pass1 for read:[%s]\n", re->name); dump_hit_list(re, 0, options->only_paired, false); dump_hit_list(re, 1, options->only_paired, false); #endif } #define EXTHEAP_unpaired_pass1_CMP(a, b) ((a)->pass1_key < (b)->pass1_key) DEF_EXTHEAP(struct read_hit *,unpaired_pass1) /* * Go through the list adding hits to the heap. */ static void read_get_vector_hits(struct read_entry * re, struct read_hit * * a, int * load, struct pass1_options * options) { //llint before = rdtsc(), after; TIME_COUNTER_START(tpg.get_vector_hits_tc); int st, i; assert(re != NULL && a != NULL && load != NULL && *load == 0); assert(pair_mode == PAIR_NONE || half_paired); for (st = 0; st < 2; st++) { assert(re->n_hits[st] == 0 || re->hits[st] != NULL); for (i = 0; i < re->n_hits[st]; i++) { if (re->hits[st][i].saved == 1) continue; if (re->hits[st][i].score_vector >= (int)abs_or_pct(options->threshold, re->hits[st][i].score_max) && (*load < options->num_outputs || ( (IS_ABSOLUTE(options->threshold) && re->hits[st][i].score_vector > a[0]->pass1_key) || (!IS_ABSOLUTE(options->threshold) && re->hits[st][i].pct_score_vector > a[0]->pass1_key)))) { //fprintf(stderr,"%d matches\n",re->hits[st][i].matches); re->hits[st][i].pass1_key = (IS_ABSOLUTE(options->threshold)? re->hits[st][i].score_vector : re->hits[st][i].pct_score_vector); //assert(re->hits[st][i].gen_st==0); if (*load < options->num_outputs) extheap_unpaired_pass1_insert(a, load, &re->hits[st][i]); else extheap_unpaired_pass1_replace_min(a, load, &re->hits[st][i]); } } } //after = rdtsc(); //tpg.get_vector_hits_ticks += MAX(after - before, 0); TIME_COUNTER_STOP(tpg.get_vector_hits_tc); } /* // don't use this for qsort directly; static inline int read_hit_purealign_cmp(struct read_hit * rh1, struct read_hit * rh2) { if (rh1->cn < rh2->cn) { return -1; } else if (rh1->cn == rh2->cn) { if (rh1->st < rh2->st) { return -1; } else if (rh1->st == rh2->st) { return rh1->g_off - rh2->g_off; } } return 1; } // don't use this for qsort directly; static inline int read_hit_overlap_cmp(struct read_hit * rh1, struct read_hit * rh2) { if (rh1->cn < rh2->cn) { return -1; } else if (rh1->cn == rh2->cn) { if (rh1->st < rh2->st) { return -1; } else if (rh1->st == rh2->st) { if (rh1->g_off + rh1->w_len < rh2->g_off) { return -1; } else if (rh2->g_off + rh2->w_len < rh1->g_off) { return 1; } else { return 0; } } } return 1; } // sort by: conting number; then strand; then g_off static int pass2_read_hit_align_cmp(void const * e1, void const * e2) { struct read_hit * rh1 = *(struct read_hit * *)e1; struct read_hit * rh2 = *(struct read_hit * *)e2; return read_hit_purealign_cmp(rh1, rh2); } // sort by: conting number; then strand; then g_off; but return 0 if overlapping static int pass2_read_hit_overlap_cmp(void const * e1, void const * e2) { struct read_hit * rh1 = *(struct read_hit * *)e1; struct read_hit * rh2 = *(struct read_hit * *)e2; return read_hit_overlap_cmp(rh1, rh2); } */ // sort by score static int pass2_read_hit_score_cmp(void const * e1, void const * e2) { return (*(struct read_hit * *)e2)->pass2_key - (*(struct read_hit * *)e1)->pass2_key; } static inline int pass2_read_hit_sfrp_gen_start_cmp_base(struct read_hit const * rh1, struct read_hit const * rh2) { if (rh1->cn != rh2->cn) return rh1->cn - rh2->cn; if (rh1->gen_st != rh2->gen_st) return rh1->gen_st - rh2->gen_st; return rh1->sfrp->genome_start - rh2->sfrp->genome_start; } static inline int pass2_read_hit_sfrp_gen_end_cmp_base(struct read_hit const * rh1, struct read_hit const * rh2) { if (rh1->cn != rh2->cn) return rh1->cn - rh2->cn; if (rh1->gen_st != rh2->gen_st) return rh1->gen_st - rh2->gen_st; return (- rh1->sfrp->genome_start - rh1->sfrp->rmapped + rh1->sfrp->deletions - rh1->sfrp->insertions) - (- rh2->sfrp->genome_start - rh2->sfrp->rmapped + rh2->sfrp->deletions - rh2->sfrp->insertions); } static int pass2_read_hit_sfrp_gen_start_cmp(void const * e1, void const * e2) { return pass2_read_hit_sfrp_gen_start_cmp_base(*(struct read_hit * *)e1, *(struct read_hit * *)e2); } static int pass2_read_hit_sfrp_gen_end_cmp(void const * e1, void const * e2) { return pass2_read_hit_sfrp_gen_end_cmp_base(*(struct read_hit * *)e1, *(struct read_hit * *)e2); } // remove duplicate hits static void read_remove_duplicate_hits(struct read_hit * * hits_pass2, int * n_hits_pass2) { int i, j, k, max, max_idx; //llint before = gettimeinusecs(); //llint before = rdtsc(), after; TIME_COUNTER_START(tpg.duplicate_removal_tc); /* qsort(hits_pass2, *n_hits_pass2, sizeof(hits_pass2[0]), pass2_read_hit_align_cmp); i = 0; k = 0; while (i < *n_hits_pass2) { max = hits_pass2[i]->pass2_key; max_idx = i; j = i + 1; while (j < *n_hits_pass2 && !pass2_read_hit_overlap_cmp(&hits_pass2[i], &hits_pass2[j])) { if (hits_pass2[j]->pass2_key > max) { max = hits_pass2[j]->pass2_key; max_idx = j; } j++; } if (max_idx != k) { hits_pass2[k] = hits_pass2[max_idx]; } k++; i = j; } return k; */ qsort(hits_pass2, *n_hits_pass2, sizeof(hits_pass2[0]), pass2_read_hit_sfrp_gen_start_cmp); i = 0; k = 0; while (i < *n_hits_pass2) { max = hits_pass2[i]->pass2_key; max_idx = i; j = i + 1; while (j < *n_hits_pass2 && !pass2_read_hit_sfrp_gen_start_cmp(&hits_pass2[i], &hits_pass2[j])) { if (hits_pass2[j]->pass2_key > max) { max = hits_pass2[j]->pass2_key; max_idx = j; } j++; } if (max_idx != k) { hits_pass2[k] = hits_pass2[max_idx]; } k++; i = j; } #pragma omp atomic total_dup_single_matches += (*n_hits_pass2) - k; *n_hits_pass2 = k; qsort(hits_pass2, *n_hits_pass2, sizeof(hits_pass2[0]), pass2_read_hit_sfrp_gen_end_cmp); i = 0; k = 0; while (i < *n_hits_pass2) { max = hits_pass2[i]->pass2_key; max_idx = i; j = i + 1; while (j < *n_hits_pass2 && !pass2_read_hit_sfrp_gen_end_cmp(&hits_pass2[i], &hits_pass2[j])) { if (hits_pass2[j]->pass2_key > max) { max = hits_pass2[j]->pass2_key; max_idx = j; } j++; } if (max_idx != k) { hits_pass2[k] = hits_pass2[max_idx]; } k++; i = j; } #pragma omp atomic total_dup_single_matches += (*n_hits_pass2) - k; *n_hits_pass2 = k; //duplicate_removal_usecs[omp_get_thread_num()] += gettimeinusecs() - before; //after = rdtsc(); //tpg.duplicate_removal_ticks += MAX(after - before, 0); TIME_COUNTER_STOP(tpg.duplicate_removal_tc); } static void hit_run_post_sw(struct read_entry * re, struct read_hit * rh) { //fprintf(stderr, "running post sw for read: [%s]\n", re->name); if (shrimp_mode == MODE_COLOUR_SPACE) { post_sw(re->read[rh->st], re->initbp[rh->st], re->qual, rh->sfrp); } else { // LS: cheat; reuse SW score to get posterior rh->sfrp->posterior = pow(2.0, ((double)rh->sfrp->score - (double)rh->sfrp->rmapped * (2.0 * score_alpha + score_beta))/score_alpha); } rh->sfrp->posterior_score = (int)rint(score_alpha * log(rh->sfrp->posterior) / log(2.0) + (double)rh->sfrp->rmapped * (2.0 * score_alpha + score_beta)); if (rh->sfrp->posterior_score < 0) rh->sfrp->posterior_score = 0; rh->sfrp->pct_posterior_score = (1000 * 100 * rh->sfrp->posterior_score)/rh->score_max; rh->score_full = rh->sfrp->posterior_score; rh->pct_score_full = rh->sfrp->pct_posterior_score; } /* * Do a final pass for given read. */ static bool read_pass2(struct read_entry * re, struct read_hit * * hits_pass1, int n_hits_pass1, struct read_hit * * hits_pass2, int * n_hits_pass2, struct pass2_options * options) { //llint before = rdtsc(), after; TIME_COUNTER_START(tpg.pass2_tc); int i, cnt; assert(re != NULL && hits_pass1 != NULL && hits_pass2 != NULL); /* compute full alignment scores */ for (i = 0; i < n_hits_pass1; i++) { struct read_hit * rh = hits_pass1[i]; if (rh->score_full < 0 || rh->sfrp == NULL) { hit_run_full_sw(re, rh, (int)abs_or_pct(options->threshold, rh->score_max)); if (compute_mapping_qualities && rh->score_full > 0) { hit_run_post_sw(re, rh); /* fprintf(stderr, "read:%s\tSW-prob:%g\tposterior:%g\n", re->name, pow(2.0, ((double)rh->sfrp->score - (double)rh->sfrp->rmapped * (2.0 * score_alpha + score_beta))/score_alpha) * pow(1.0 - pr_xover, rh->sfrp->rmapped - rh->sfrp->crossovers), rh->sfrp->posterior); */ } rh->pass2_key = (IS_ABSOLUTE(options->threshold)? rh->score_full : (int)rh->pct_score_full); } if (rh->score_full >= abs_or_pct(options->threshold, rh->score_max)) { hits_pass2[*n_hits_pass2] = rh; (*n_hits_pass2)++; } } #ifdef DEBUG_HIT_LIST_PASS2 fprintf(stderr, "Dumping hit list after pass2 (before duplicates removal and sorting) for read:[%s]\n", re->name); for (i = 0; i < n_hits_pass1; i++) { dump_hit(hits_pass1[i]); } #endif // remove duplicate hits read_remove_duplicate_hits(hits_pass2, n_hits_pass2); // sort by non-increasing score qsort(hits_pass2, *n_hits_pass2, sizeof(hits_pass2[0]), pass2_read_hit_score_cmp); /* if (compute_mapping_qualities && *n_hits_pass2 > 0) { // compute Z re->mq_denominator = hits_pass2[0]->sfrp->posterior; for (i = 1; i < *n_hits_pass2 && hits_pass2[i]->score_full > hits_pass2[0]->score_full - score_difference_mq_cutoff; i++) { re->mq_denominator += hits_pass2[i]->sfrp->posterior; } // compute mapping qualities for (i = 0; i < *n_hits_pass2 && hits_pass2[i]->score_full > hits_pass2[0]->score_full - score_difference_mq_cutoff; i++) { hits_pass2[i]->mapping_quality = qv_from_pr_corr(hits_pass2[i]->sfrp->posterior / re->mq_denominator); if (hits_pass2[i]->mapping_quality >= 10) { #pragma omp atomic total_reads_matched_conf++; } } for ( ; i < *n_hits_pass2; i++) { hits_pass2[i]->mapping_quality = 0; } } */ // trim excess mappings if (*n_hits_pass2 > options->num_outputs) *n_hits_pass2 = options->num_outputs; // if strata is set, keep only top scoring hits if (options->strata && *n_hits_pass2 > 0) { for (i = 1; i < *n_hits_pass2 && hits_pass2[0]->score_full == hits_pass2[i]->score_full; i++); *n_hits_pass2 = i; } // drop reads with too many mappings if (*n_hits_pass2 > 0) { if (max_alignments == 0 || *n_hits_pass2 <= max_alignments) { #pragma omp atomic total_reads_matched++; } else { #pragma omp atomic total_reads_dropped++; *n_hits_pass2 = 0; } } // mark remaining hits as saved for (i = 0; i < *n_hits_pass2; i++) { hits_pass2[i]->saved = 1; hits_pass2[i]->sfrp->in_use = true; } // update counts re->final_matches += *n_hits_pass2; #pragma omp atomic total_single_matches += re->final_matches; // check stop condition if (options->stop_count == 0) return true; for (i = 0, cnt = 0; i < *n_hits_pass2; i++) { if (hits_pass2[i]->score_full >= (int)abs_or_pct(options->stop_threshold, hits_pass2[i]->score_max)) { cnt++; } } //after = rdtsc(); //tpg.pass2_ticks += MAX(after - before, 0); TIME_COUNTER_STOP(tpg.pass2_tc); return cnt >= options->stop_count; } static void read_save_final_hits(read_entry * re, read_hit * * hits_pass2, int n_hits_pass2) { int i; // make room for new hits re->final_unpaired_hits = (read_hit *) my_realloc(re->final_unpaired_hits, (re->n_final_unpaired_hits + n_hits_pass2) * sizeof(re->final_unpaired_hits[0]), re->n_final_unpaired_hits * sizeof(re->final_unpaired_hits[0]), &mem_mapping, "final_unpaired_hits [%s]", re->name); for (i = 0; i < n_hits_pass2; i++) { memcpy(&re->final_unpaired_hits[re->n_final_unpaired_hits + i], hits_pass2[i], sizeof(*hits_pass2[0])); // erase sfrp structs to prevent them from being freed too early hits_pass2[i]->sfrp = NULL; } re->n_final_unpaired_hits += n_hits_pass2; } void handle_read(struct read_entry * re, struct read_mapping_options_t * options, int n_options) { bool done; int option_index = 0; int i; struct read_hit * * hits_pass1 = NULL; struct read_hit * * hits_pass2 = NULL; int n_hits_pass1; int n_hits_pass2; llint before = gettimeinusecs(); if (re->mapidx[0] == NULL) { read_get_mapidxs(re); } do { if (options[option_index].regions.recompute) { region_map_id++; region_map_id &= ((1 << region_map_id_bits) - 1); read_get_region_counts(re, 0, &options[option_index].regions); read_get_region_counts(re, 1, &options[option_index].regions); } if (options[option_index].anchor_list.recompute) { read_free_anchor_list(re, &mem_mapping); read_get_anchor_list(re, &options[option_index].anchor_list); } if (options[option_index].hit_list.recompute) { read_free_hit_list(re, &mem_mapping); read_get_hit_list(re, &options[option_index].hit_list); } if (options[option_index].pass1.recompute) { read_pass1(re, &options[option_index].pass1); } hits_pass1 = (struct read_hit * *) my_malloc(options[option_index].pass1.num_outputs * sizeof(hits_pass1[0]), &mem_mapping, "hits_pass1 [%s]", re->name); n_hits_pass1 = 0; read_get_vector_hits(re, hits_pass1, &n_hits_pass1, &options[option_index].pass1); hits_pass2 = (struct read_hit * *) my_malloc(options[option_index].pass1.num_outputs * sizeof(hits_pass2[0]), &mem_mapping, "hits_pass2 [%s]", re->name); n_hits_pass2 = 0; done = read_pass2(re, hits_pass1, n_hits_pass1, hits_pass2, &n_hits_pass2, &options[option_index].pass2); if (n_hits_pass2 > 0) { if (options[option_index].pass2.save_outputs) read_save_final_hits(re, hits_pass2, n_hits_pass2); else { read_output(re, hits_pass2, n_hits_pass2); for (i = 0; i < n_hits_pass2; i++) hits_pass2[i]->sfrp->in_use = false; } re->mapped = true; } // free pass1 structs for (i = 0; i < n_hits_pass1; i++) if (hits_pass1[i]->sfrp != NULL && !hits_pass1[i]->sfrp->in_use) free_sfrp(&hits_pass1[i]->sfrp, re, &mem_mapping); my_free(hits_pass1, options[option_index].pass1.num_outputs * sizeof(hits_pass1[0]), &mem_mapping, "hits_pass1 [%s]", re->name); my_free(hits_pass2, options[option_index].pass1.num_outputs * sizeof(hits_pass2[0]), &mem_mapping, "hits_pass2 [%s]", re->name); } while (!done && ++option_index < n_options); //if (options_index >= n_options) { // this read fell through all the option sets //} tpg.read_handle_usecs += gettimeinusecs() - before; if (pair_mode == PAIR_NONE) { if (aligned_reads_file != NULL && re->mapped) { #pragma omp critical (aligned_reads_file) { fasta_write_read(aligned_reads_file, re); } } if ((unaligned_reads_file != NULL || sam_unaligned) && !re->mapped) { #pragma omp critical (unaligned_reads_file) { if (unaligned_reads_file != NULL) { fasta_write_read(unaligned_reads_file, re); } } if (sam_unaligned) { hit_output(re, NULL, NULL, false, NULL, 0); } } } } #define EXTHEAP_paired_pass1_CMP(a, b) ((a).key < (b).key) DEF_EXTHEAP(struct read_hit_pair, paired_pass1) /* * Go through the hit lists, constructing paired hits. */ static void readpair_get_vector_hits(struct read_entry * re1, struct read_entry * re2, struct read_hit_pair * a, int * load, struct pairing_options * options) { //llint before = rdtsc(), after; TIME_COUNTER_START(tpg.get_vector_hits_tc); int st1, st2, i, j; read_hit_pair tmp; assert(re1 != NULL && re2 != NULL && a != NULL); for (st1 = 0; st1 < 2; st1++) { st2 = 1 - st1; // opposite strand for (i = 0; i < re1->n_hits[st1]; i++) { if (re1->hits[st1][i].saved == 1) continue; if (re1->hits[st1][i].pair_min < 0) continue; for (j = re1->hits[st1][i].pair_min; j <= re1->hits[st1][i].pair_max; j++) { if (re2->hits[st2][j].saved == 1) continue; //if (re1->hits[st1][i].matches + re2->hits[st2][j].matches < options->min_num_matches) // continue; tmp.score = re1->hits[st1][i].score_vector + re2->hits[st2][j].score_vector; tmp.score_max = re1->hits[st1][i].score_max + re2->hits[st2][j].score_max; tmp.pct_score = (1000 * 100 * tmp.score)/tmp.score_max; tmp.key = (IS_ABSOLUTE(options->pass1_threshold)? tmp.score : tmp.pct_score); tmp.improper_mapping = false; tmp.rh_idx[0] = -1; tmp.rh_idx[1] = -1; if (tmp.score >= (int)abs_or_pct(options->pass1_threshold, tmp.score_max) && (*load < options->pass1_num_outputs || tmp.key > a[0].key)) { tmp.rh[0] = &re1->hits[st1][i]; tmp.rh[1] = &re2->hits[st2][j]; //TODO HISTORGRAM IS OFF! NOT SAM tmp.insert_size = (int)(st1 == 0? re2->hits[st2][j].g_off - (re1->hits[st1][i].g_off + re1->hits[st1][i].w_len) : re1->hits[st1][i].g_off - (re2->hits[st2][j].g_off + re2->hits[st2][j].w_len)); if (*load < options->pass1_num_outputs) extheap_paired_pass1_insert(a, load, tmp); else extheap_paired_pass1_replace_min(a, load, tmp); } } } } //after = rdtsc(); //tpg.get_vector_hits_ticks += MAX(after - before, 0); TIME_COUNTER_STOP(tpg.get_vector_hits_tc); } /* // sort by: contig number; then strand; then g_off of hit[0] static int pass2_readpair_hit0_align_cmp(void const * e1, void const * e2) { struct read_hit_pair * rhp1 = (struct read_hit_pair *)e1; struct read_hit_pair * rhp2 = (struct read_hit_pair *)e2; return read_hit_purealign_cmp(rhp1->rh[0], rhp2->rh[0]); } // sort by: contig number; then strand; then g_off of hit[1] static int pass2_readpair_hit1_align_cmp(void const * e1, void const * e2) { struct read_hit_pair * rhp1 = (struct read_hit_pair *)e1; struct read_hit_pair * rhp2 = (struct read_hit_pair *)e2; return read_hit_purealign_cmp(rhp1->rh[1], rhp2->rh[1]); } // sort by: contig number; then strand; then g_off of hit[0]; but return 0 if hit[0] overlapping static int pass2_readpair_hit0_overlap_cmp(void const * e1, void const * e2) { struct read_hit_pair * rhp1 = (struct read_hit_pair *)e1; struct read_hit_pair * rhp2 = (struct read_hit_pair *)e2; return read_hit_overlap_cmp(rhp1->rh[0], rhp2->rh[0]); } // sort by: contig number; then strand; then g_off of hit[1]; but return 0 if hit[1] overlapping static int pass2_readpair_hit1_overlap_cmp(void const * e1, void const * e2) { struct read_hit_pair * rhp1 = (struct read_hit_pair *)e1; struct read_hit_pair * rhp2 = (struct read_hit_pair *)e2; return read_hit_overlap_cmp(rhp1->rh[1], rhp2->rh[1]); } */ // sort by score static int pass2_read_hit_pair_score_cmp(void const * e1, void const * e2) { return ((struct read_hit_pair *)e2)->key - ((struct read_hit_pair *)e1)->key; } static int pass2_readpair_hit0_sfrp_gen_start_cmp(void const * e1, void const * e2) { return pass2_read_hit_sfrp_gen_start_cmp_base(((read_hit_pair *)e1)->rh[0], ((read_hit_pair *)e2)->rh[0]); } static int pass2_readpair_hit1_sfrp_gen_start_cmp(void const * e1, void const * e2) { return pass2_read_hit_sfrp_gen_start_cmp_base(((read_hit_pair *)e1)->rh[1], ((read_hit_pair *)e2)->rh[1]); } static int pass2_readpair_hit0_sfrp_gen_end_cmp(void const * e1, void const * e2) { return pass2_read_hit_sfrp_gen_end_cmp_base(((read_hit_pair *)e1)->rh[0], ((read_hit_pair *)e2)->rh[0]); } static int pass2_readpair_hit1_sfrp_gen_end_cmp(void const * e1, void const * e2) { return pass2_read_hit_sfrp_gen_end_cmp_base(((read_hit_pair *)e1)->rh[1], ((read_hit_pair *)e2)->rh[1]); } static int pass2_readpair_pointer_cmp(void const * e1, void const * e2) { struct read_hit_pair * rhpp1 = (struct read_hit_pair *)e1; struct read_hit_pair * rhpp2 = (struct read_hit_pair *)e2; if (rhpp1->rh[0] < rhpp2->rh[0]) return -1; else if (rhpp1->rh[0] > rhpp2->rh[0]) return 1; else { // equal rh[0] if (rhpp1->rh[1] < rhpp2->rh[1]) return -1; else if (rhpp1->rh[1] > rhpp2->rh[1]) return 1; } return 0; } static inline void readpair_compute_paired_hit(struct read_hit * rh1, struct read_hit * rh2, bool threshold_is_absolute, struct read_hit_pair * dest) { dest->rh[0] = rh1; dest->rh[1] = rh2; dest->score_max = rh1->score_max + rh2->score_max; dest->score = rh1->score_full + rh2->score_full; dest->pct_score = (1000 * 100 * dest->score)/dest->score_max; dest->key = threshold_is_absolute? dest->score : dest->pct_score; //dest->insert_size = abs(get_insert_size(rh1, rh2)); int ins_sz = get_insert_size(rh1, rh2); int sign; if (pair_mode == PAIR_OPP_IN || pair_mode == PAIR_COL_FW) { if (rh1->gen_st == 0) sign = +1; else sign = -1; } else { // PAIR_OPP_OUT, PAIR_COL_BW if (rh1->gen_st == 1) sign = +1; else sign = -1; } dest->insert_size = sign * ins_sz; dest->improper_mapping = false; //tmp.isize_score=expected_isize==-1 ? 0 : abs(tmp.isize-expected_isize); } static void readpair_push_dominant_single_hits(struct read_hit_pair * hits_pass2, int * n_hits_pass2, bool threshold_is_absolute, int num_in_pair, int (*cmp)(void const *, void const *)) { int i, j, k, max, max_idx; qsort(hits_pass2, *n_hits_pass2, sizeof(hits_pass2[0]), cmp); i = 0; while (i < *n_hits_pass2) { max = hits_pass2[i].rh[num_in_pair]->score_full; max_idx = i; j = i + 1; while (j < *n_hits_pass2 && !cmp((void *)&hits_pass2[i], (void *)&hits_pass2[j])) { if (hits_pass2[j].rh[num_in_pair]->score_full > max) { max = hits_pass2[j].rh[num_in_pair]->score_full; max_idx = j; } j++; } for (k = i; k < j; k++) { if (k != max_idx) { hits_pass2[k].rh[num_in_pair] = hits_pass2[max_idx].rh[num_in_pair]; readpair_compute_paired_hit(hits_pass2[k].rh[0], hits_pass2[k].rh[1], threshold_is_absolute, &hits_pass2[k]); } } i = j; } } // remove duplicate hits static void readpair_remove_duplicate_hits(struct read_hit_pair * hits_pass2, int * n_hits_pass2, bool threshold_is_absolute) { /* int i, j, k, l, max, max_idx; qsort(hits_pass2, *n_hits_pass2, sizeof(hits_pass2[0]), pass2_readpair_hit0_align_cmp); i = 0; k = 0; while (i < *n_hits_pass2) { j = i + 1; while (j < *n_hits_pass2 && !pass2_readpair_hit0_overlap_cmp(&hits_pass2[j-1], &hits_pass2[j])) { j++; } if (j > i + 1) { qsort(&hits_pass2[i], j - i, sizeof(hits_pass2[0]), pass2_readpair_hit1_align_cmp); } while (i < j) { max = hits_pass2[i].key; max_idx = i; l = i + 1; while (l < j && !pass2_readpair_hit1_overlap_cmp(&hits_pass2[max_idx], &hits_pass2[l])) { if (hits_pass2[l].key > max) { max = hits_pass2[l].key; max_idx = l; } l++; } if (max_idx != k) { hits_pass2[k] = hits_pass2[max_idx]; } k++; i = l; } } return k; */ int tmp; //llint before = gettimeinusecs(); //llint before = rdtsc(), after; TIME_COUNTER_START(tpg.duplicate_removal_tc); readpair_push_dominant_single_hits(hits_pass2, n_hits_pass2, threshold_is_absolute, 0, pass2_readpair_hit0_sfrp_gen_start_cmp); readpair_push_dominant_single_hits(hits_pass2, n_hits_pass2, threshold_is_absolute, 0, pass2_readpair_hit0_sfrp_gen_end_cmp); readpair_push_dominant_single_hits(hits_pass2, n_hits_pass2, threshold_is_absolute, 1, pass2_readpair_hit1_sfrp_gen_start_cmp); readpair_push_dominant_single_hits(hits_pass2, n_hits_pass2, threshold_is_absolute, 1, pass2_readpair_hit1_sfrp_gen_end_cmp); qsort(hits_pass2, *n_hits_pass2, sizeof(hits_pass2[0]), pass2_readpair_pointer_cmp); tmp = removedups(hits_pass2, *n_hits_pass2, sizeof(hits_pass2[0]), pass2_readpair_pointer_cmp); #pragma omp atomic total_dup_paired_matches += (*n_hits_pass2) - tmp; *n_hits_pass2 = tmp; //duplicate_removal_usecs[omp_get_thread_num()] += gettimeinusecs() - before; //after = rdtsc(); //tpg.duplicate_removal_ticks += MAX(after - before, 0); TIME_COUNTER_STOP(tpg.duplicate_removal_tc); } /* * Do a final pass for given read. */ static bool readpair_pass2(struct read_entry * re1, struct read_entry * re2, struct read_hit_pair * hits_pass1, int n_hits_pass1, struct read_hit_pair * hits_pass2, int * n_hits_pass2, struct pairing_options * options, struct pass2_options * options1, struct pass2_options * options2) { //llint before = rdtsc(), after; TIME_COUNTER_START(tpg.pass2_tc); int i, j, cnt; /* compute full alignment scores */ for (i = 0; i < n_hits_pass1; i++) { for (j = 0; j < 2; j++) { struct read_hit * rh = hits_pass1[i].rh[j]; struct read_entry * re = (j == 0? re1 : re2); double thres = (j == 0? options1->threshold : options2->threshold); if (rh->score_full < 0 || rh->sfrp == NULL) { hit_run_full_sw(re, rh, (int)abs_or_pct(thres, rh->score_max)); if (compute_mapping_qualities && rh->score_full > 0) { hit_run_post_sw(re, rh); } } } //hitpair_run_post_sw(re1, re2, hits_pass1[i].rh[0], hits_pass1[i].rh[1]); if (hits_pass1[i].rh[0]->score_full == 0 || hits_pass1[i].rh[1]->score_full == 0) { continue; } if (hits_pass1[i].rh[0]->score_full + hits_pass1[i].rh[1]->score_full >= (int)abs_or_pct(options->pass2_threshold, hits_pass1[i].score_max)) { readpair_compute_paired_hit(hits_pass1[i].rh[0], hits_pass1[i].rh[1], IS_ABSOLUTE(options->pass2_threshold), &hits_pass2[*n_hits_pass2]); (*n_hits_pass2)++; } } #ifdef DEBUG_HIT_LIST_PASS2 fprintf(stderr, "Dumping paired hits after pass2 (before duplicates removal and sorting) for reads:[%s,%s]\n", re1->name, re2->name); for (i = 0; i < n_hits_pass1; i++) { dump_hit(hits_pass1[i].rh[0]); dump_hit(hits_pass1[i].rh[1]); } #endif // remove duplicates readpair_remove_duplicate_hits(hits_pass2, n_hits_pass2, IS_ABSOLUTE(options->pass2_threshold)); // sort by score qsort(hits_pass2, *n_hits_pass2, sizeof(hits_pass2[0]), pass2_read_hit_pair_score_cmp); // trim excess mappings if (*n_hits_pass2 > options->pass2_num_outputs) *n_hits_pass2 = options->pass2_num_outputs; // if strata is set, keep only top scoring hits if (options->strata && *n_hits_pass2 > 0) { for (i = 1; i < *n_hits_pass2 && hits_pass2[0].score == hits_pass2[i].score; i++); *n_hits_pass2 = i; } // drop pairs with too many mappings if (*n_hits_pass2 > 0) { if (max_alignments == 0 || *n_hits_pass2 <= max_alignments) { #pragma omp atomic total_pairs_matched++; } else { #pragma omp atomic total_pairs_dropped++; *n_hits_pass2 = 0; } } // mark remaining hits as saved for (i = 0; i < *n_hits_pass2; i++) { hits_pass2[i].rh[0]->saved = 1; hits_pass2[i].rh[0]->sfrp->in_use = true; hits_pass2[i].rh[1]->saved = 1; hits_pass2[i].rh[1]->sfrp->in_use = true; } // update counts re1->final_matches += *n_hits_pass2; #pragma omp atomic total_paired_matches += re1->final_matches; // check stop condition if (options->stop_count == 0) return true; for (i = 0, cnt = 0; i < *n_hits_pass2; i++) { if (hits_pass2[i].score >= (int)abs_or_pct(options->stop_threshold, hits_pass2[i].score_max)) { cnt++; } } //after = rdtsc(); //tpg.pass2_ticks += MAX(after - before, 0); TIME_COUNTER_STOP(tpg.pass2_tc); return cnt >= options->stop_count; } static void readpair_compute_mp_ranges(struct read_entry * re1, struct read_entry * re2, struct pairing_options * options) { switch (pair_mode) { case PAIR_OPP_IN: re1->delta_g_off_min[0] = options->min_insert_size - re2->window_len; re1->delta_g_off_max[0] = options->max_insert_size + (re1->window_len - re1->read_len) - re2->read_len; re1->delta_g_off_min[1] = - options->max_insert_size + re1->read_len + (re2->read_len - re2->window_len); re1->delta_g_off_max[1] = - options->min_insert_size + re1->window_len; re2->delta_g_off_min[0] = - re1->delta_g_off_max[1]; re2->delta_g_off_max[0] = - re1->delta_g_off_min[1]; re2->delta_g_off_min[1] = - re1->delta_g_off_max[0]; re2->delta_g_off_max[1] = - re1->delta_g_off_min[0]; break; case PAIR_OPP_OUT: /* re1->delta_g_off_min[0] = - options->max_insert_size - re2->window_len; re1->delta_g_off_max[0] = - options->min_insert_size + (re1->window_len - re1->read_len) - re2->read_len; re1->delta_g_off_min[1] = options->min_insert_size + re1->read_len + (re2->read_len - re2->window_len); re1->delta_g_off_max[1] = options->max_insert_size + re1->window_len; re2->delta_g_off_min[0] = - re1->delta_g_off_max[1]; re2->delta_g_off_max[0] = - re1->delta_g_off_min[1]; re2->delta_g_off_min[1] = - re1->delta_g_off_max[0]; re2->delta_g_off_max[1] = - re1->delta_g_off_min[0]; */ re1->delta_g_off_min[0] = options->min_insert_size - re2->window_len; re1->delta_g_off_min[0] += re1->read_len + re2->read_len; re1->delta_g_off_max[0] = options->max_insert_size + (re1->window_len - re1->read_len) - re2->read_len; re1->delta_g_off_max[0] += re1->read_len + re2->read_len; re1->delta_g_off_min[1] = - options->max_insert_size + re1->read_len + (re2->read_len - re2->window_len); re1->delta_g_off_min[1] -= re1->read_len + re2->read_len; re1->delta_g_off_max[1] = - options->min_insert_size + re1->window_len; re1->delta_g_off_max[1] -= re1->read_len + re2->read_len; re2->delta_g_off_min[0] = - re1->delta_g_off_max[1]; re2->delta_g_off_max[0] = - re1->delta_g_off_min[1]; re2->delta_g_off_min[1] = - re1->delta_g_off_max[0]; re2->delta_g_off_max[1] = - re1->delta_g_off_min[0]; break; case PAIR_COL_FW: /* re1->delta_g_off_min[0] = options->min_insert_size + (re2->read_len - re2->window_len); re1->delta_g_off_max[0] = options->max_insert_size + (re1->window_len - re1->read_len); re1->delta_g_off_min[1] = - options->max_insert_size + re1->read_len - re2->window_len; re1->delta_g_off_max[1] = - options->min_insert_size + re1->window_len - re2->read_len; re2->delta_g_off_min[0] = - re1->delta_g_off_max[0]; re2->delta_g_off_max[0] = - re1->delta_g_off_min[0]; re2->delta_g_off_min[1] = - re1->delta_g_off_max[1]; re2->delta_g_off_max[1] = - re1->delta_g_off_min[1]; */ re1->delta_g_off_min[0] = options->min_insert_size - re2->window_len; re1->delta_g_off_min[0] += re2->read_len; re1->delta_g_off_max[0] = options->max_insert_size + (re1->window_len - re1->read_len) - re2->read_len; re1->delta_g_off_max[0] += re2->read_len; re1->delta_g_off_min[1] = - options->max_insert_size + re1->read_len + (re2->read_len - re2->window_len); re1->delta_g_off_min[1] -= re2->read_len; re1->delta_g_off_max[1] = - options->min_insert_size + re1->window_len; re1->delta_g_off_max[1] -= re2->read_len; re2->delta_g_off_min[0] = - re1->delta_g_off_max[0]; re2->delta_g_off_max[0] = - re1->delta_g_off_min[0]; re2->delta_g_off_min[1] = - re1->delta_g_off_max[1]; re2->delta_g_off_max[1] = - re1->delta_g_off_min[1]; break; case PAIR_COL_BW: /* re1->delta_g_off_min[0] = - options->max_insert_size + (re2->read_len - re2->window_len); re1->delta_g_off_max[0] = - options->min_insert_size + (re1->window_len - re1->read_len); re1->delta_g_off_min[1] = options->min_insert_size + re1->read_len - re2->window_len; re1->delta_g_off_max[1] = options->max_insert_size + re1->window_len - re2->read_len; re2->delta_g_off_min[0] = - re1->delta_g_off_max[0]; re2->delta_g_off_max[0] = - re1->delta_g_off_min[0]; re2->delta_g_off_min[1] = - re1->delta_g_off_max[1]; re2->delta_g_off_max[1] = - re1->delta_g_off_min[1]; */ re1->delta_g_off_min[0] = options->min_insert_size - re2->window_len; re1->delta_g_off_min[0] += re1->read_len; re1->delta_g_off_max[0] = options->max_insert_size + (re1->window_len - re1->read_len) - re2->read_len; re1->delta_g_off_max[0] += re1->read_len; re1->delta_g_off_min[1] = - options->max_insert_size + re1->read_len + (re2->read_len - re2->window_len); re1->delta_g_off_min[1] -= re1->read_len; re1->delta_g_off_max[1] = - options->min_insert_size + re1->window_len; re1->delta_g_off_max[1] -= re1->read_len; re2->delta_g_off_min[0] = - re1->delta_g_off_max[0]; re2->delta_g_off_max[0] = - re1->delta_g_off_min[0]; re2->delta_g_off_min[1] = - re1->delta_g_off_max[1]; re2->delta_g_off_max[1] = - re1->delta_g_off_min[1]; break; default: assert(0); break; } re1->delta_region_min[0] = re1->delta_g_off_min[0] >= 0? re1->delta_g_off_min[0]/(1 << region_bits) : - 1 - (- re1->delta_g_off_min[0] - 1)/(1 << region_bits); re1->delta_region_max[0] = re1->delta_g_off_max[0] > 0? 1 + (re1->delta_g_off_max[0] - 1)/(1 << region_bits) : - (- re1->delta_g_off_max[0]/(1 << region_bits)); re1->delta_region_min[1] = re1->delta_g_off_min[1] >= 0? re1->delta_g_off_min[1]/(1 << region_bits) : - 1 - (- re1->delta_g_off_min[1] - 1)/(1 << region_bits); re1->delta_region_max[1] = re1->delta_g_off_max[1] > 0? 1 + (re1->delta_g_off_max[1] - 1)/(1 << region_bits) : - (- re1->delta_g_off_max[1]/(1 << region_bits)); re2->delta_region_min[0] = re2->delta_g_off_min[0] >= 0? re2->delta_g_off_min[0]/(1 << region_bits) : - 1 - (- re2->delta_g_off_min[0] - 1)/(1 << region_bits); re2->delta_region_max[0] = re2->delta_g_off_max[0] > 0? 1 + (re2->delta_g_off_max[0] - 1)/(1 << region_bits) : - (- re2->delta_g_off_max[0]/(1 << region_bits)); re2->delta_region_min[1] = re2->delta_g_off_min[1] >= 0? re2->delta_g_off_min[1]/(1 << region_bits) : - 1 - (- re2->delta_g_off_min[1] - 1)/(1 << region_bits); re2->delta_region_max[1] = re2->delta_g_off_max[1] > 0? 1 + (re2->delta_g_off_max[1] - 1)/(1 << region_bits) : - (- re2->delta_g_off_max[1]/(1 << region_bits)); #ifdef DEBUG_DUMP_MP_RANGES fprintf(stderr, "mp_ranges read[%s]: goff_min[0]:%d goff_max[0]:%d goff_min[1]:%d goff_max[1]:%d reg_min[0]:%d reg_max[0]:%d reg_min[1]:%d reg_max[1]:%d\n", re1->name, re1->delta_g_off_min[0], re1->delta_g_off_max[0], re1->delta_g_off_min[1], re1->delta_g_off_max[1], re1->delta_region_min[0], re1->delta_region_max[0], re1->delta_region_min[1], re1->delta_region_max[1]); fprintf(stderr, "mp_ranges read[%s]: goff_min[0]:%d goff_max[0]:%d goff_min[1]:%d goff_max[1]:%d reg_min[0]:%d reg_max[0]:%d reg_min[1]:%d reg_max[1]:%d\n", re2->name, re2->delta_g_off_min[0], re2->delta_g_off_max[0], re2->delta_g_off_min[1], re2->delta_g_off_max[1], re2->delta_region_min[0], re2->delta_region_max[0], re2->delta_region_min[1], re2->delta_region_max[1]); #endif } // move paired hit structs to area that persists until the output static void readpair_save_final_hits(pair_entry * pe, read_hit_pair * hits_pass2, int n_hits_pass2) { int i, nip, j; read_hit * rhp; read_hit_pair * rhpp; // first, copy array of paired hit entries pe->final_paired_hits = (struct read_hit_pair *) my_realloc(pe->final_paired_hits, (pe->n_final_paired_hits + n_hits_pass2) * sizeof(pe->final_paired_hits[0]), pe->n_final_paired_hits * sizeof(pe->final_paired_hits[0]), &mem_mapping, "final_paired_hits [%s,%s]", pe->re[0]->name, pe->re[1]->name); memcpy(&pe->final_paired_hits[pe->n_final_paired_hits], hits_pass2, n_hits_pass2 * sizeof(pe->final_paired_hits[0])); // next, copy read_hit entries to persistent pool for (i = 0; i < n_hits_pass2; i++) { for (nip = 0; nip < 2; nip++) { // did we already move pe->final_paired_hits[pe->n_final_paired_hits + i]? if (pe->final_paired_hits[pe->n_final_paired_hits + i].rh[nip] != NULL) { // no, need to move it now pe->final_paired_hit_pool[nip] = (read_hit *) my_realloc(pe->final_paired_hit_pool[nip], (pe->final_paired_hit_pool_size[nip] + 1) * sizeof(pe->final_paired_hit_pool[nip][0]), pe->final_paired_hit_pool_size[nip] * sizeof(pe->final_paired_hit_pool[nip][0]), &mem_mapping, "final_paired_hit_pool[%d] [%s,%s]", nip, pe->re[0]->name, pe->re[1]->name); pe->final_paired_hit_pool_size[nip]++; rhp = &pe->final_paired_hit_pool[nip][pe->final_paired_hit_pool_size[nip] - 1]; memcpy(rhp, pe->final_paired_hits[pe->n_final_paired_hits + i].rh[nip], sizeof(read_hit)); // the sfrp pointer was copied, delete old reference to prevent it from being freed too soon hits_pass2[i].rh[nip]->sfrp = NULL; // now change pointers in final_paired_hits array to new read_entry location // use indexes rather than pointers though for (j = i; j < n_hits_pass2; j++) { rhpp = &pe->final_paired_hits[pe->n_final_paired_hits + j]; if (rhpp->rh[nip] == hits_pass2[i].rh[nip]) { rhpp->rh[nip] = NULL; rhpp->rh_idx[nip] = pe->final_paired_hit_pool_size[nip] - 1; rhp->paired_hit_idx = (int *) my_realloc(rhp->paired_hit_idx, (rhp->n_paired_hit_idx + 1) * sizeof(rhp->paired_hit_idx[0]), rhp->n_paired_hit_idx * sizeof(rhp->paired_hit_idx[0]), &mem_mapping, "paired_hits [%s]", pe->re[nip]->name); rhp->n_paired_hit_idx++; rhp->paired_hit_idx[rhp->n_paired_hit_idx - 1] = pe->n_final_paired_hits + j; } } } } } pe->n_final_paired_hits += n_hits_pass2; } void handle_readpair(pair_entry * pe, struct readpair_mapping_options_t * options, int n_options) { read_entry * re1 = pe->re[0]; read_entry * re2 = pe->re[1]; bool done; int option_index = 0; int i; struct read_hit_pair * hits_pass1 = NULL; struct read_hit_pair * hits_pass2 = NULL; int n_hits_pass1; int n_hits_pass2; llint before = gettimeinusecs(); read_get_mapidxs(re1); read_get_mapidxs(re2); do { readpair_compute_mp_ranges(re1, re2, &options[option_index].pairing); if (options[option_index].read[0].regions.recompute || options[option_index].read[1].regions.recompute) { region_map_id++; region_map_id &= ((1 << region_map_id_bits) - 1); read_get_region_counts(re1, 0, &options[option_index].read[0].regions); read_get_region_counts(re1, 1, &options[option_index].read[0].regions); read_get_region_counts(re2, 0, &options[option_index].read[1].regions); read_get_region_counts(re2, 1, &options[option_index].read[1].regions); if (options[option_index].read[0].anchor_list.use_mp_region_counts) { read_get_mp_region_counts(re1, 0); read_get_mp_region_counts(re1, 1); } if (options[option_index].read[1].anchor_list.use_mp_region_counts) { read_get_mp_region_counts(re2, 0); read_get_mp_region_counts(re2, 1); } } if (options[option_index].read[0].anchor_list.recompute) { read_free_anchor_list(re1, &mem_mapping); read_get_anchor_list(re1, &options[option_index].read[0].anchor_list); } if (options[option_index].read[1].anchor_list.recompute) { read_free_anchor_list(re2, &mem_mapping); read_get_anchor_list(re2, &options[option_index].read[1].anchor_list); } if (options[option_index].read[0].hit_list.recompute) { read_free_hit_list(re1, &mem_mapping); read_get_hit_list(re1, &options[option_index].read[0].hit_list); } if (options[option_index].read[1].hit_list.recompute) { read_free_hit_list(re2, &mem_mapping); read_get_hit_list(re2, &options[option_index].read[1].hit_list); } readpair_pair_up_hits(re1, re2); if (options[option_index].read[0].pass1.recompute) { read_pass1(re1, &options[option_index].read[0].pass1); } if (options[option_index].read[1].pass1.recompute) { read_pass1(re2, &options[option_index].read[1].pass1); } hits_pass1 = (struct read_hit_pair *) my_malloc(options[option_index].pairing.pass1_num_outputs * sizeof(hits_pass1[0]), &mem_mapping, "hits_pass1 [%s,%s]", re1->name, re2->name); n_hits_pass1 = 0; readpair_get_vector_hits(re1, re2, hits_pass1, &n_hits_pass1, &options[option_index].pairing); hits_pass2 = (struct read_hit_pair *) my_malloc(options[option_index].pairing.pass1_num_outputs * sizeof(hits_pass2[0]), &mem_mapping, "hits_pass2 [%s,%s]", re1->name, re2->name); n_hits_pass2 = 0; done = readpair_pass2(re1, re2, hits_pass1, n_hits_pass1, hits_pass2, &n_hits_pass2, &options[option_index].pairing, &options[option_index].read[0].pass2, &options[option_index].read[1].pass2); if (n_hits_pass2 > 0) { if (options[option_index].pairing.save_outputs) readpair_save_final_hits(pe, hits_pass2, n_hits_pass2); else { readpair_output_no_mqv(pe, hits_pass2, n_hits_pass2); for (i = 0; i < n_hits_pass2; i++) { hits_pass2[i].rh[0]->sfrp->in_use = false; hits_pass2[i].rh[1]->sfrp->in_use = false; } } pe->mapped = true; } for (i = 0; i < n_hits_pass1; i++) { if (hits_pass1[i].rh[0]->sfrp != NULL && !hits_pass1[i].rh[0]->sfrp->in_use) free_sfrp(&hits_pass1[i].rh[0]->sfrp, re1, &mem_mapping); if (hits_pass1[i].rh[1]->sfrp != NULL && !hits_pass1[i].rh[1]->sfrp->in_use) free_sfrp(&hits_pass1[i].rh[1]->sfrp, re2, &mem_mapping); } my_free(hits_pass1, options[option_index].pairing.pass1_num_outputs * sizeof(hits_pass1[0]), &mem_mapping, "hits_pass1 [%s,%s]", re1->name, re2->name); my_free(hits_pass2, options[option_index].pairing.pass1_num_outputs * sizeof(hits_pass2[0]), &mem_mapping, "hits_pass2 [%s,%s]", re1->name, re2->name); } while (!done && ++option_index < n_options); tpg.read_handle_usecs += gettimeinusecs() - before; if (option_index >= n_options && half_paired) { // this read pair fell through all the option sets; try unpaired mapping handle_read(re1, unpaired_mapping_options[0], n_unpaired_mapping_options[0]); handle_read(re2, unpaired_mapping_options[1], n_unpaired_mapping_options[1]); } // OUTPUT readpair_output(pe); if (aligned_reads_file != NULL && (pe->mapped || re1->mapped || re2->mapped)) { #pragma omp critical (aligned_reads_file) { fasta_write_read(aligned_reads_file, re1); fasta_write_read(aligned_reads_file, re2); } } if ((unaligned_reads_file != NULL || sam_unaligned) && !(pe->mapped || re1->mapped || re2->mapped)) { #pragma omp critical (unaligned_reads_file) { if (unaligned_reads_file != NULL) { fasta_write_read(unaligned_reads_file, re1); fasta_write_read(unaligned_reads_file, re2); } } if (sam_unaligned) { hit_output(re1, NULL, NULL, true, NULL, 0); hit_output(re2, NULL, NULL, false, NULL, 0); } } }
GB_binop__minus_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__minus_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__minus_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__minus_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_uint8) // A*D function (colscale): GB (_AxD__minus_uint8) // D*A function (rowscale): GB (_DxB__minus_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__minus_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__minus_uint8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_uint8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_uint8) // C=scalar+B GB (_bind1st__minus_uint8) // C=scalar+B' GB (_bind1st_tran__minus_uint8) // C=A+scalar GB (_bind2nd__minus_uint8) // C=A'+scalar GB (_bind2nd_tran__minus_uint8) // C type: uint8_t // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_UINT8 || GxB_NO_MINUS_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__minus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_uint8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__minus_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__minus_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DenseVector.h
//================================================================================================= /*! // \file blaze/math/smp/openmp/DenseVector.h // \brief Header file for the OpenMP-based dense vector SMP implementation // // Copyright (C) 2012-2020 Klaus Iglberger - All Rights Reserved // // This file is part of the Blaze library. You can redistribute it and/or modify it under // the terms of the New (Revised) BSD License. Redistribution and use in source and binary // forms, with or without modification, are permitted provided that the following conditions // are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other materials // provided with the distribution. // 3. Neither the names of the Blaze development group nor the names of its contributors // may be used to endorse or promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT // SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR // BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. */ //================================================================================================= #ifndef _BLAZE_MATH_SMP_OPENMP_DENSEVECTOR_H_ #define _BLAZE_MATH_SMP_OPENMP_DENSEVECTOR_H_ //************************************************************************************************* // Includes //************************************************************************************************* #include <omp.h> #include <blaze/math/Aliases.h> #include <blaze/math/constraints/SMPAssignable.h> #include <blaze/math/expressions/DenseVector.h> #include <blaze/math/expressions/SparseVector.h> #include <blaze/math/simd/SIMDTrait.h> #include <blaze/math/smp/ParallelSection.h> #include <blaze/math/smp/SerialSection.h> #include <blaze/math/typetraits/IsDenseVector.h> #include <blaze/math/typetraits/IsSIMDCombinable.h> #include <blaze/math/typetraits/IsSMPAssignable.h> #include <blaze/math/views/Subvector.h> #include <blaze/system/MacroDisable.h> #include <blaze/system/SMP.h> #include <blaze/util/algorithms/Min.h> #include <blaze/util/Assert.h> #include <blaze/util/EnableIf.h> #include <blaze/util/FunctionTrace.h> #include <blaze/util/StaticAssert.h> #include <blaze/util/Types.h> namespace blaze { //================================================================================================= // // OPENMP-BASED ASSIGNMENT KERNELS // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP (compound) assignment of a dense vector to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side dense vector to be assigned. // \param op The (compound) assignment operation. // \return void // // This function is the backend implementation of the OpenMP-based SMP assignment of a dense // vector to a dense vector.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side dense vector , bool TF2 // Transpose flag of the right-hand side dense vector , typename OP > // Type of the assignment operation void openmpAssign( DenseVector<VT1,TF1>& lhs, const DenseVector<VT2,TF2>& rhs, OP op ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); using ET1 = ElementType_t<VT1>; using ET2 = ElementType_t<VT2>; constexpr bool simdEnabled( VT1::simdEnabled && VT2::simdEnabled && IsSIMDCombinable_v<ET1,ET2> ); constexpr size_t SIMDSIZE( SIMDTrait< ElementType_t<VT1> >::size ); const bool lhsAligned( (*lhs).isAligned() ); const bool rhsAligned( (*rhs).isAligned() ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (*lhs).size() % threads ) != 0UL )? 1UL : 0UL ); const size_t equalShare ( (*lhs).size() / threads + addon ); const size_t rest ( equalShare & ( SIMDSIZE - 1UL ) ); const size_t sizePerThread( ( simdEnabled && rest )?( equalShare - rest + SIMDSIZE ):( equalShare ) ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t index( i*sizePerThread ); if( index >= (*lhs).size() ) continue; const size_t size( min( sizePerThread, (*lhs).size() - index ) ); if( simdEnabled && lhsAligned && rhsAligned ) { auto target( subvector<aligned>( *lhs, index, size, unchecked ) ); const auto source( subvector<aligned>( *rhs, index, size, unchecked ) ); op( target, source ); } else if( simdEnabled && lhsAligned ) { auto target( subvector<aligned>( *lhs, index, size, unchecked ) ); const auto source( subvector<unaligned>( *rhs, index, size, unchecked ) ); op( target, source ); } else if( simdEnabled && rhsAligned ) { auto target( subvector<unaligned>( *lhs, index, size, unchecked ) ); const auto source( subvector<aligned>( *rhs, index, size, unchecked ) ); op( target, source ); } else { auto target( subvector<unaligned>( *lhs, index, size, unchecked ) ); const auto source( subvector<unaligned>( *rhs, index, size, unchecked ) ); op( target, source ); } } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP (compound) assignment of a sparse vector to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be assigned. // \param op The (compound) assignment operation. // \return void // // This function is the backend implementation of the OpenMP-based SMP assignment of a sparse // vector to a dense vector.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side sparse vector , bool TF2 // Transpose flag of the right-hand side sparse vector , typename OP > // Type of the assignment operation void openmpAssign( DenseVector<VT1,TF1>& lhs, const SparseVector<VT2,TF2>& rhs, OP op ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (*lhs).size() % threads ) != 0UL )? 1UL : 0UL ); const size_t sizePerThread( (*lhs).size() / threads + addon ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t index( i*sizePerThread ); if( index >= (*lhs).size() ) continue; const size_t size( min( sizePerThread, (*lhs).size() - index ) ); auto target( subvector<unaligned>( *lhs, index, size, unchecked ) ); const auto source( subvector<unaligned>( *rhs, index, size, unchecked ) ); op( target, source ); } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // PLAIN ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be assigned. // \return void // // This function implements the default OpenMP-based SMP assignment to a dense vector. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands are // not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (*lhs).size() == (*rhs).size(), "Invalid vector sizes" ); assign( *lhs, *rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be assigned. // \return void // // This function performs the OpenMP-based SMP assignment to a dense vector. Due to the // explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (*lhs).size() == (*rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(*rhs).canSMPAssign() ) { assign( *lhs, *rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( *lhs, *rhs, []( auto& a, const auto& b ){ assign( a, b ); } ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // ADDITION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP addition assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be added. // \return void // // This function implements the default OpenMP-based SMP addition assignment to a dense vector. // Due to the explicit application of the SFINAE principle, this function can only be selected // by the compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpAddAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (*lhs).size() == (*rhs).size(), "Invalid vector sizes" ); addAssign( *lhs, *rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP addition assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be added. // \return void // // This function implements the OpenMP-based SMP addition assignment to a dense vector. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands are // not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpAddAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (*lhs).size() == (*rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(*rhs).canSMPAssign() ) { addAssign( *lhs, *rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( *lhs, *rhs, []( auto& a, const auto& b ){ addAssign( a, b ); } ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // SUBTRACTION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP subtraction assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be subtracted. // \return void // // This function implements the default OpenMP-based SMP subtraction assignment of a vector to // a dense vector. Due to the explicit application of the SFINAE principle, this function can // only be selected by the compiler in case both operands are SMP-assignable and the element // types of both operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpSubAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (*lhs).size() == (*rhs).size(), "Invalid vector sizes" ); subAssign( *lhs, *rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP subtraction assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be subtracted. // \return void // // This function implements the OpenMP-based SMP subtraction assignment to a dense vector. Due // to the explicit application of the SFINAE principle, this function can only be selected by // the compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpSubAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (*lhs).size() == (*rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(*rhs).canSMPAssign() ) { subAssign( *lhs, *rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( *lhs, *rhs, []( auto& a, const auto& b ){ subAssign( a, b ); } ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // MULTIPLICATION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP multiplication assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be multiplied. // \return void // // This function implements the default OpenMP-based SMP multiplication assignment to a dense // vector. Due to the explicit application of the SFINAE principle, this function can only be // selected by the compiler in case both operands are SMP-assignable and the element types of // both operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpMultAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (*lhs).size() == (*rhs).size(), "Invalid vector sizes" ); multAssign( *lhs, *rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP multiplication assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side dense vector to be multiplied. // \return void // // This function implements the OpenMP-based SMP multiplication assignment to a dense vector. // Due to the explicit application of the SFINAE principle, this function can only be selected // by the compiler in case both operands are SMP-assignable and the element types of both // operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpMultAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (*lhs).size() == (*rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(*rhs).canSMPAssign() ) { multAssign( *lhs, *rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( *lhs, *rhs, []( auto& a, const auto& b ){ multAssign( a, b ); } ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // DIVISION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP division assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector divisor. // \return void // // This function implements the default OpenMP-based SMP division assignment to a dense vector. // Due to the explicit application of the SFINAE principle, this function can only be selected // by the compiler in case both operands are SMP-assignable and the element types of both // operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpDivAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (*lhs).size() == (*rhs).size(), "Invalid vector sizes" ); divAssign( *lhs, *rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP division assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side dense vector divisor. // \return void // // This function implements the OpenMP-based SMP division assignment to a dense vector. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpDivAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (*lhs).size() == (*rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(*rhs).canSMPAssign() ) { divAssign( *lhs, *rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( *lhs, *rhs, []( auto& a, const auto& b ){ divAssign( a, b ); } ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // COMPILE TIME CONSTRAINTS // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ namespace { BLAZE_STATIC_ASSERT( BLAZE_OPENMP_PARALLEL_MODE ); } /*! \endcond */ //************************************************************************************************* } // namespace blaze #endif
GB_unop__isnan_bool_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__isnan_bool_fp32 // op(A') function: GB_unop_tran__isnan_bool_fp32 // C type: bool // A type: float // cast: float cij = (aij) // unaryop: cij = isnan (aij) #define GB_ATYPE \ float #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = isnan (x) ; // casting #define GB_CAST(z, aij) \ float z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = (aij) ; \ Cx [pC] = isnan (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNAN || GxB_NO_BOOL || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__isnan_bool_fp32 ( bool *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = (aij) ; Cx [p] = isnan (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = (aij) ; Cx [p] = isnan (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__isnan_bool_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
blur_float.h
// Copyright (C) 2017 Basile Fraboni // Copyright (C) 2014 Ivan Kutskir // All Rights Reserved // You may use, distribute and modify this code under the // terms of the MIT license. For further details please refer // to : https://mit-license.org/ // //! //! \file blur.cpp //! \author Basile Fraboni //! \date 2017 //! //! \brief The software is a C++ implementation of a fast //! Gaussian blur algorithm by Ivan Kutskir. For further details //! please refer to : //! http://blog.ivank.net/fastest-gaussian-blur.html //! //! Floating point version //! /* #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image_write.h" */ #include <iostream> #include <cmath> #include <chrono> //! //! \fn void std_to_box(int boxes[], float sigma, int n) //! //! \brief this function converts the standard deviation of //! Gaussian blur into dimensions of boxes for box blur. For //! further details please refer to : //! https://www.peterkovesi.com/matlabfns/#integral //! https://www.peterkovesi.com/papers/FastGaussianSmoothing.pdf //! //! \param[out] boxes boxes dimensions //! \param[in] sigma Gaussian standard deviation //! \param[in] n number of boxes //! void std_to_box(int boxes[], float sigma, int n) { // ideal filter width float wi = std::sqrt((12*sigma*sigma/n)+1); int wl = std::floor(wi); if(wl%2==0) wl--; int wu = wl+2; float mi = (12*sigma*sigma - n*wl*wl - 4*n*wl - 3*n)/(-4*wl - 4); int m = std::round(mi); for(int i=0; i<n; i++) boxes[i] = ((i < m ? wl : wu) - 1) / 2; } //! //! \fn void horizontal_blur(float * in, float * out, int w, int h, int r) //! //! \brief this function performs the horizontal blur pass for box blur. //! //! \param[in,out] in source channel //! \param[in,out] out target channel //! \param[in] w image width //! \param[in] h image height //! \param[in] r box dimension //! void horizontal_blur(float *in, float *out, int w, int h, int r) { float iarr = 1.f / (r+r+1); #pragma omp parallel for for(int i=0; i<h; i++) { int ti = i*w, li = ti, ri = ti+r; float fv = in[ti], lv = in[ti+w-1], val = (r+1)*fv; for(int j=0; j<r; j++) val += in[ti+j]; for(int j=0 ; j<=r ; j++) { val += in[ri++] - fv ; out[ti++] = val*iarr; } for(int j=r+1; j<w-r; j++) { val += in[ri++] - in[li++]; out[ti++] = val*iarr; } for(int j=w-r; j<w ; j++) { val += lv - in[li++]; out[ti++] = val*iarr; } } } //! //! \fn void total_blur(float * in, float * out, int w, int h, int r) //! //! \brief this function performs the total blur pass for box blur. //! //! \param[in,out] in source channel //! \param[in,out] out target channel //! \param[in] w image width //! \param[in] h image height //! \param[in] r box dimension //! void total_blur(float * in, float * out, int w, int h, int r) { float iarr = 1.f / (r+r+1); #pragma omp parallel for for(int i=0; i<w; i++) { int ti = i, li = ti, ri = ti+r*w; float fv = in[ti], lv = in[ti+w*(h-1)], val = (r+1)*fv; for(int j=0; j<r; j++) val += in[ti+j*w]; for(int j=0 ; j<=r ; j++) { val += in[ri] - fv ; out[ti] = val*iarr; ri+=w; ti+=w; } for(int j=r+1; j<h-r; j++) { val += in[ri] - in[li]; out[ti] = val*iarr; li+=w; ri+=w; ti+=w; } for(int j=h-r; j<h ; j++) { val += lv - in[li]; out[ti] = val*iarr; li+=w; ti+=w; } } } //! //! \fn void box_blur(float * in, float * out, int w, int h, int r) //! //! \brief this function performs a box blur pass. //! //! \param[in,out] in source channel //! \param[in,out] out target channel //! \param[in] w image width //! \param[in] h image height //! \param[in] r box dimension //! void box_blur(float *& in, float *& out, int w, int h, int r) { std::swap(in, out); horizontal_blur(out, in, w, h, r); total_blur(in, out, w, h, r); // Note to myself : // here we could go anisotropic with different radiis rx,ry in HBlur and TBlur } //! //! \fn void fast_gaussian_blur(float * in, float * out, int w, int h, float sigma) //! //! \brief this function performs a fast Gaussian blur. Applying several //! times box blur tends towards a true Gaussian blur. Three passes are sufficient //! for good results. For further details please refer to : //! http://blog.ivank.net/fastest-gaussian-blur.html //! //! \param[in,out] in source channel //! \param[in,out] out target channel //! \param[in] w image width //! \param[in] h image height //! \param[in] r box dimension //! void fast_gaussian_blur(float * in, float * out, int w, int h, float sigma) { // sigma conversion to box dimensions int boxes[3]; std_to_box(boxes, sigma, 3); box_blur(in, out, w, h, boxes[0]); box_blur(out, in, w, h, boxes[1]); box_blur(in, out, w, h, boxes[2]); } /* //! \code{.cpp} int main(int argc, char * argv[]) { if( argc < 2 ) exit(1); const char * image_file = argv[1]; const float sigma = argc > 2 ? std::atof(argv[2]) : 3.; const char * output_file = argc > 3 ? argv[3] : "blur.png"; // Image loading int width, height, channels; unsigned char * image_data = stbi_load(argv[1], &width, &height, &channels, 0); std::cout << "Source image: " << width<<"x" << height << " ("<<channels<<")" << std::endl; if(channels < 3) { std::cout<< "Input images must be RGB images."<<std::endl; exit(1); } // copy data int size = width * height; // output channels r,g,b float * newb = new float[size]; float * newg = new float[size]; float * newr = new float[size]; // input channels r,g,b float * oldb = new float[size]; float * oldg = new float[size]; float * oldr = new float[size]; // channels copy r,g,b for(int i = 0; i < size; ++i) { oldb[i] = image_data[channels * i + 0] / 255.f; oldg[i] = image_data[channels * i + 1] / 255.f; oldr[i] = image_data[channels * i + 2] / 255.f; } // per channel filter auto start = std::chrono::system_clock::now(); fast_gaussian_blur(oldb, newb, width, height, sigma); fast_gaussian_blur(oldg, newg, width, height, sigma); fast_gaussian_blur(oldr, newr, width, height, sigma); auto end = std::chrono::system_clock::now(); // stats float elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(end-start).count(); std::cout << "time " << elapsed << "ms" << std::endl; // channels copy r,g,b for(int i = 0; i < size; ++i) { image_data[channels * i + 0] = (unsigned char) std::min(255.f, std::max(0.f, 255.f * newb[i])); image_data[channels * i + 1] = (unsigned char) std::min(255.f, std::max(0.f, 255.f * newg[i])); image_data[channels * i + 2] = (unsigned char) std::min(255.f, std::max(0.f, 255.f * newr[i])); } // save std::string file(output_file); std::string ext = file.substr(file.size()-3); if( ext == "bmp" ) stbi_write_bmp(output_file, width, height, channels, image_data); else if( ext == "jpg" ) stbi_write_jpg(output_file, width, height, channels, image_data, 90); else { if( ext != "png" ) { std::cout << "format '" << ext << "' not supported writing default .png" << std::endl; file = file.substr(0, file.size()-4) + std::string(".png"); } stbi_write_png(file.c_str(), width, height, channels, image_data, channels*width); } stbi_image_free(image_data); // clean memory delete[] newr; delete[] newb; delete[] newg; delete[] oldr; delete[] oldb; delete[] oldg; image_data = stbi_load(argv[1], &width, &height, &channels, 0); return 0; } //! \endcode */
bitshuffle_core.c
/* * Bitshuffle - Filter for improving compression of typed binary data. * * Author: Kiyoshi Masui <kiyo@physics.ubc.ca> * Website: http://www.github.com/kiyo-masui/bitshuffle * Created: 2014 * * See LICENSE file for details about copyright and rights to use. * */ #include "bitshuffle_core.h" #include "bitshuffle_internals.h" #include <stdio.h> #include <string.h> #if defined(__AVX2__) && defined (__SSE2__) #define USEAVX2 #endif #if defined(__SSE2__) #define USESSE2 #endif #if defined(__ARM_NEON__) || (__ARM_NEON) #define USEARMNEON #endif // Conditional includes for SSE2 and AVX2. #ifdef USEAVX2 #include <immintrin.h> #elif defined USESSE2 #include <emmintrin.h> #elif defined USEARMNEON #include <arm_neon.h> #endif #if defined(_OPENMP) && defined(_MSC_VER) typedef int64_t omp_size_t; #else typedef size_t omp_size_t; #endif // Macros. #define CHECK_MULT_EIGHT(n) if (n % 8) return -80; #define MAX(X,Y) ((X) > (Y) ? (X) : (Y)) /* ---- Functions indicating compile time instruction set. ---- */ int bshuf_using_NEON(void) { #ifdef USEARMNEON return 1; #else return 0; #endif } int bshuf_using_SSE2(void) { #ifdef USESSE2 return 1; #else return 0; #endif } int bshuf_using_AVX2(void) { #ifdef USEAVX2 return 1; #else return 0; #endif } /* ---- Worker code not requiring special instruction sets. ---- * * The following code does not use any x86 specific vectorized instructions * and should compile on any machine * */ /* Transpose 8x8 bit array packed into a single quadword *x*. * *t* is workspace. */ #define TRANS_BIT_8X8(x, t) { \ t = (x ^ (x >> 7)) & 0x00AA00AA00AA00AALL; \ x = x ^ t ^ (t << 7); \ t = (x ^ (x >> 14)) & 0x0000CCCC0000CCCCLL; \ x = x ^ t ^ (t << 14); \ t = (x ^ (x >> 28)) & 0x00000000F0F0F0F0LL; \ x = x ^ t ^ (t << 28); \ } /* Transpose 8x8 bit array along the diagonal from upper right to lower left */ #define TRANS_BIT_8X8_BE(x, t) { \ t = (x ^ (x >> 9)) & 0x0055005500550055LL; \ x = x ^ t ^ (t << 9); \ t = (x ^ (x >> 18)) & 0x0000333300003333LL; \ x = x ^ t ^ (t << 18); \ t = (x ^ (x >> 36)) & 0x000000000F0F0F0FLL; \ x = x ^ t ^ (t << 36); \ } /* Transpose of an array of arbitrarily typed elements. */ #define TRANS_ELEM_TYPE(in, out, lda, ldb, type_t) { \ size_t ii, jj, kk; \ const type_t* in_type = (const type_t*) in; \ type_t* out_type = (type_t*) out; \ for(ii = 0; ii + 7 < lda; ii += 8) { \ for(jj = 0; jj < ldb; jj++) { \ for(kk = 0; kk < 8; kk++) { \ out_type[jj*lda + ii + kk] = \ in_type[ii*ldb + kk * ldb + jj]; \ } \ } \ } \ for(ii = lda - lda % 8; ii < lda; ii ++) { \ for(jj = 0; jj < ldb; jj++) { \ out_type[jj*lda + ii] = in_type[ii*ldb + jj]; \ } \ } \ } /* Memory copy with bshuf call signature. For testing and profiling. */ int64_t bshuf_copy(const void* in, void* out, const size_t size, const size_t elem_size) { const char* in_b = (const char*) in; char* out_b = (char*) out; memcpy(out_b, in_b, size * elem_size); return size * elem_size; } /* Transpose bytes within elements, starting partway through input. */ int64_t bshuf_trans_byte_elem_remainder(const void* in, void* out, const size_t size, const size_t elem_size, const size_t start) { size_t ii, jj, kk; const char* in_b = (const char*) in; char* out_b = (char*) out; CHECK_MULT_EIGHT(start); if (size > start) { // ii loop separated into 2 loops so the compiler can unroll // the inner one. for (ii = start; ii + 7 < size; ii += 8) { for (jj = 0; jj < elem_size; jj++) { for (kk = 0; kk < 8; kk++) { out_b[jj * size + ii + kk] = in_b[ii * elem_size + kk * elem_size + jj]; } } } for (ii = size - size % 8; ii < size; ii ++) { for (jj = 0; jj < elem_size; jj++) { out_b[jj * size + ii] = in_b[ii * elem_size + jj]; } } } return size * elem_size; } /* Transpose bytes within elements. */ int64_t bshuf_trans_byte_elem_scal(const void* in, void* out, const size_t size, const size_t elem_size) { return bshuf_trans_byte_elem_remainder(in, out, size, elem_size, 0); } /* Transpose bits within bytes. */ int64_t bshuf_trans_bit_byte_remainder(const void* in, void* out, const size_t size, const size_t elem_size, const size_t start_byte) { const uint64_t* in_b = (const uint64_t*) in; uint8_t* out_b = (uint8_t*) out; uint64_t x, t; size_t ii, kk; size_t nbyte = elem_size * size; size_t nbyte_bitrow = nbyte / 8; uint64_t e=1; const int little_endian = *(uint8_t *) &e == 1; const size_t bit_row_skip = little_endian ? nbyte_bitrow : -nbyte_bitrow; const int64_t bit_row_offset = little_endian ? 0 : 7 * nbyte_bitrow; CHECK_MULT_EIGHT(nbyte); CHECK_MULT_EIGHT(start_byte); for (ii = start_byte / 8; ii < nbyte_bitrow; ii ++) { x = in_b[ii]; if (little_endian) { TRANS_BIT_8X8(x, t); } else { TRANS_BIT_8X8_BE(x, t); } for (kk = 0; kk < 8; kk ++) { out_b[bit_row_offset + kk * bit_row_skip + ii] = x; x = x >> 8; } } return size * elem_size; } /* Transpose bits within bytes. */ int64_t bshuf_trans_bit_byte_scal(const void* in, void* out, const size_t size, const size_t elem_size) { return bshuf_trans_bit_byte_remainder(in, out, size, elem_size, 0); } /* General transpose of an array, optimized for large element sizes. */ int64_t bshuf_trans_elem(const void* in, void* out, const size_t lda, const size_t ldb, const size_t elem_size) { size_t ii, jj; const char* in_b = (const char*) in; char* out_b = (char*) out; for(ii = 0; ii < lda; ii++) { for(jj = 0; jj < ldb; jj++) { memcpy(&out_b[(jj*lda + ii) * elem_size], &in_b[(ii*ldb + jj) * elem_size], elem_size); } } return lda * ldb * elem_size; } /* Transpose rows of shuffled bits (size / 8 bytes) within groups of 8. */ int64_t bshuf_trans_bitrow_eight(const void* in, void* out, const size_t size, const size_t elem_size) { size_t nbyte_bitrow = size / 8; CHECK_MULT_EIGHT(size); return bshuf_trans_elem(in, out, 8, elem_size, nbyte_bitrow); } /* Transpose bits within elements. */ int64_t bshuf_trans_bit_elem_scal(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; void *tmp_buf; CHECK_MULT_EIGHT(size); tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; count = bshuf_trans_byte_elem_scal(in, out, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_trans_bit_byte_scal(out, tmp_buf, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size); free(tmp_buf); return count; } /* For data organized into a row for each bit (8 * elem_size rows), transpose * the bytes. */ int64_t bshuf_trans_byte_bitrow_scal(const void* in, void* out, const size_t size, const size_t elem_size) { size_t ii, jj, kk, nbyte_row; const char *in_b; char *out_b; in_b = (const char*) in; out_b = (char*) out; nbyte_row = size / 8; CHECK_MULT_EIGHT(size); for (jj = 0; jj < elem_size; jj++) { for (ii = 0; ii < nbyte_row; ii++) { for (kk = 0; kk < 8; kk++) { out_b[ii * 8 * elem_size + jj * 8 + kk] = \ in_b[(jj * 8 + kk) * nbyte_row + ii]; } } } return size * elem_size; } /* Shuffle bits within the bytes of eight element blocks. */ int64_t bshuf_shuffle_bit_eightelem_scal(const void* in, void* out, \ const size_t size, const size_t elem_size) { const char *in_b; char *out_b; uint64_t x, t; size_t ii, jj, kk; size_t nbyte, out_index; uint64_t e=1; const int little_endian = *(uint8_t *) &e == 1; const size_t elem_skip = little_endian ? elem_size : -elem_size; const uint64_t elem_offset = little_endian ? 0 : 7 * elem_size; CHECK_MULT_EIGHT(size); in_b = (const char*) in; out_b = (char*) out; nbyte = elem_size * size; for (jj = 0; jj < 8 * elem_size; jj += 8) { for (ii = 0; ii + 8 * elem_size - 1 < nbyte; ii += 8 * elem_size) { x = *((uint64_t*) &in_b[ii + jj]); if (little_endian) { TRANS_BIT_8X8(x, t); } else { TRANS_BIT_8X8_BE(x, t); } for (kk = 0; kk < 8; kk++) { out_index = ii + jj / 8 + elem_offset + kk * elem_skip; *((uint8_t*) &out_b[out_index]) = x; x = x >> 8; } } } return size * elem_size; } /* Untranspose bits within elements. */ int64_t bshuf_untrans_bit_elem_scal(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; void *tmp_buf; CHECK_MULT_EIGHT(size); tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; count = bshuf_trans_byte_bitrow_scal(in, tmp_buf, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_shuffle_bit_eightelem_scal(tmp_buf, out, size, elem_size); free(tmp_buf); return count; } /* ---- Worker code that uses Arm NEON ---- * * The following code makes use of the Arm NEON instruction set. * NEON technology is the implementation of the ARM Advanced Single * Instruction Multiple Data (SIMD) extension. * The NEON unit is the component of the processor that executes SIMD instructions. * It is also called the NEON Media Processing Engine (MPE). * */ #ifdef USEARMNEON /* Transpose bytes within elements for 16 bit elements. */ int64_t bshuf_trans_byte_elem_NEON_16(const void* in, void* out, const size_t size) { size_t ii; const char *in_b = (const char*) in; char *out_b = (char*) out; int8x16_t a0, b0, a1, b1; for (ii=0; ii + 15 < size; ii += 16) { a0 = vld1q_s8(in_b + 2*ii + 0*16); b0 = vld1q_s8(in_b + 2*ii + 1*16); a1 = vzip1q_s8(a0, b0); b1 = vzip2q_s8(a0, b0); a0 = vzip1q_s8(a1, b1); b0 = vzip2q_s8(a1, b1); a1 = vzip1q_s8(a0, b0); b1 = vzip2q_s8(a0, b0); a0 = vzip1q_s8(a1, b1); b0 = vzip2q_s8(a1, b1); vst1q_s8(out_b + 0*size + ii, a0); vst1q_s8(out_b + 1*size + ii, b0); } return bshuf_trans_byte_elem_remainder(in, out, size, 2, size - size % 16); } /* Transpose bytes within elements for 32 bit elements. */ int64_t bshuf_trans_byte_elem_NEON_32(const void* in, void* out, const size_t size) { size_t ii; const char *in_b; char *out_b; in_b = (const char*) in; out_b = (char*) out; int8x16_t a0, b0, c0, d0, a1, b1, c1, d1; int64x2_t a2, b2, c2, d2; for (ii=0; ii + 15 < size; ii += 16) { a0 = vld1q_s8(in_b + 4*ii + 0*16); b0 = vld1q_s8(in_b + 4*ii + 1*16); c0 = vld1q_s8(in_b + 4*ii + 2*16); d0 = vld1q_s8(in_b + 4*ii + 3*16); a1 = vzip1q_s8(a0, b0); b1 = vzip2q_s8(a0, b0); c1 = vzip1q_s8(c0, d0); d1 = vzip2q_s8(c0, d0); a0 = vzip1q_s8(a1, b1); b0 = vzip2q_s8(a1, b1); c0 = vzip1q_s8(c1, d1); d0 = vzip2q_s8(c1, d1); a1 = vzip1q_s8(a0, b0); b1 = vzip2q_s8(a0, b0); c1 = vzip1q_s8(c0, d0); d1 = vzip2q_s8(c0, d0); a2 = vzip1q_s64(vreinterpretq_s64_s8(a1), vreinterpretq_s64_s8(c1)); b2 = vzip2q_s64(vreinterpretq_s64_s8(a1), vreinterpretq_s64_s8(c1)); c2 = vzip1q_s64(vreinterpretq_s64_s8(b1), vreinterpretq_s64_s8(d1)); d2 = vzip2q_s64(vreinterpretq_s64_s8(b1), vreinterpretq_s64_s8(d1)); vst1q_s64((int64_t *) (out_b + 0*size + ii), a2); vst1q_s64((int64_t *) (out_b + 1*size + ii), b2); vst1q_s64((int64_t *) (out_b + 2*size + ii), c2); vst1q_s64((int64_t *) (out_b + 3*size + ii), d2); } return bshuf_trans_byte_elem_remainder(in, out, size, 4, size - size % 16); } /* Transpose bytes within elements for 64 bit elements. */ int64_t bshuf_trans_byte_elem_NEON_64(const void* in, void* out, const size_t size) { size_t ii; const char* in_b = (const char*) in; char* out_b = (char*) out; int8x16_t a0, b0, c0, d0, e0, f0, g0, h0; int8x16_t a1, b1, c1, d1, e1, f1, g1, h1; for (ii=0; ii + 15 < size; ii += 16) { a0 = vld1q_s8(in_b + 8*ii + 0*16); b0 = vld1q_s8(in_b + 8*ii + 1*16); c0 = vld1q_s8(in_b + 8*ii + 2*16); d0 = vld1q_s8(in_b + 8*ii + 3*16); e0 = vld1q_s8(in_b + 8*ii + 4*16); f0 = vld1q_s8(in_b + 8*ii + 5*16); g0 = vld1q_s8(in_b + 8*ii + 6*16); h0 = vld1q_s8(in_b + 8*ii + 7*16); a1 = vzip1q_s8 (a0, b0); b1 = vzip2q_s8 (a0, b0); c1 = vzip1q_s8 (c0, d0); d1 = vzip2q_s8 (c0, d0); e1 = vzip1q_s8 (e0, f0); f1 = vzip2q_s8 (e0, f0); g1 = vzip1q_s8 (g0, h0); h1 = vzip2q_s8 (g0, h0); a0 = vzip1q_s8 (a1, b1); b0 = vzip2q_s8 (a1, b1); c0 = vzip1q_s8 (c1, d1); d0 = vzip2q_s8 (c1, d1); e0 = vzip1q_s8 (e1, f1); f0 = vzip2q_s8 (e1, f1); g0 = vzip1q_s8 (g1, h1); h0 = vzip2q_s8 (g1, h1); a1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (a0), vreinterpretq_s32_s8 (c0)); b1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (a0), vreinterpretq_s32_s8 (c0)); c1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (b0), vreinterpretq_s32_s8 (d0)); d1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (b0), vreinterpretq_s32_s8 (d0)); e1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (e0), vreinterpretq_s32_s8 (g0)); f1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (e0), vreinterpretq_s32_s8 (g0)); g1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (f0), vreinterpretq_s32_s8 (h0)); h1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (f0), vreinterpretq_s32_s8 (h0)); a0 = (int8x16_t) vzip1q_s64 (vreinterpretq_s64_s8 (a1), vreinterpretq_s64_s8 (e1)); b0 = (int8x16_t) vzip2q_s64 (vreinterpretq_s64_s8 (a1), vreinterpretq_s64_s8 (e1)); c0 = (int8x16_t) vzip1q_s64 (vreinterpretq_s64_s8 (b1), vreinterpretq_s64_s8 (f1)); d0 = (int8x16_t) vzip2q_s64 (vreinterpretq_s64_s8 (b1), vreinterpretq_s64_s8 (f1)); e0 = (int8x16_t) vzip1q_s64 (vreinterpretq_s64_s8 (c1), vreinterpretq_s64_s8 (g1)); f0 = (int8x16_t) vzip2q_s64 (vreinterpretq_s64_s8 (c1), vreinterpretq_s64_s8 (g1)); g0 = (int8x16_t) vzip1q_s64 (vreinterpretq_s64_s8 (d1), vreinterpretq_s64_s8 (h1)); h0 = (int8x16_t) vzip2q_s64 (vreinterpretq_s64_s8 (d1), vreinterpretq_s64_s8 (h1)); vst1q_s8(out_b + 0*size + ii, a0); vst1q_s8(out_b + 1*size + ii, b0); vst1q_s8(out_b + 2*size + ii, c0); vst1q_s8(out_b + 3*size + ii, d0); vst1q_s8(out_b + 4*size + ii, e0); vst1q_s8(out_b + 5*size + ii, f0); vst1q_s8(out_b + 6*size + ii, g0); vst1q_s8(out_b + 7*size + ii, h0); } return bshuf_trans_byte_elem_remainder(in, out, size, 8, size - size % 16); } /* Transpose bytes within elements using best NEON algorithm available. */ int64_t bshuf_trans_byte_elem_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; // Trivial cases: power of 2 bytes. switch (elem_size) { case 1: count = bshuf_copy(in, out, size, elem_size); return count; case 2: count = bshuf_trans_byte_elem_NEON_16(in, out, size); return count; case 4: count = bshuf_trans_byte_elem_NEON_32(in, out, size); return count; case 8: count = bshuf_trans_byte_elem_NEON_64(in, out, size); return count; } // Worst case: odd number of bytes. Turns out that this is faster for // (odd * 2) byte elements as well (hence % 4). if (elem_size % 4) { count = bshuf_trans_byte_elem_scal(in, out, size, elem_size); return count; } // Multiple of power of 2: transpose hierarchically. { size_t nchunk_elem; void* tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; if ((elem_size % 8) == 0) { nchunk_elem = elem_size / 8; TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int64_t); count = bshuf_trans_byte_elem_NEON_64(out, tmp_buf, size * nchunk_elem); bshuf_trans_elem(tmp_buf, out, 8, nchunk_elem, size); } else if ((elem_size % 4) == 0) { nchunk_elem = elem_size / 4; TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int32_t); count = bshuf_trans_byte_elem_NEON_32(out, tmp_buf, size * nchunk_elem); bshuf_trans_elem(tmp_buf, out, 4, nchunk_elem, size); } else { // Not used since scalar algorithm is faster. nchunk_elem = elem_size / 2; TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int16_t); count = bshuf_trans_byte_elem_NEON_16(out, tmp_buf, size * nchunk_elem); bshuf_trans_elem(tmp_buf, out, 2, nchunk_elem, size); } free(tmp_buf); return count; } } /* Creates a mask made up of the most significant * bit of each byte of 'input' */ int32_t move_byte_mask_neon(uint8x16_t input) { return ( ((input[0] & 0x80) >> 7) | (((input[1] & 0x80) >> 7) << 1) | (((input[2] & 0x80) >> 7) << 2) | (((input[3] & 0x80) >> 7) << 3) | (((input[4] & 0x80) >> 7) << 4) | (((input[5] & 0x80) >> 7) << 5) | (((input[6] & 0x80) >> 7) << 6) | (((input[7] & 0x80) >> 7) << 7) | (((input[8] & 0x80) >> 7) << 8) | (((input[9] & 0x80) >> 7) << 9) | (((input[10] & 0x80) >> 7) << 10) | (((input[11] & 0x80) >> 7) << 11) | (((input[12] & 0x80) >> 7) << 12) | (((input[13] & 0x80) >> 7) << 13) | (((input[14] & 0x80) >> 7) << 14) | (((input[15] & 0x80) >> 7) << 15) ); } /* Transpose bits within bytes. */ int64_t bshuf_trans_bit_byte_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { size_t ii, kk; const char* in_b = (const char*) in; char* out_b = (char*) out; uint16_t* out_ui16; int64_t count; size_t nbyte = elem_size * size; CHECK_MULT_EIGHT(nbyte); int16x8_t xmm; int32_t bt; for (ii = 0; ii + 15 < nbyte; ii += 16) { xmm = vld1q_s16((int16_t *) (in_b + ii)); for (kk = 0; kk < 8; kk++) { bt = move_byte_mask_neon((uint8x16_t) xmm); xmm = vshlq_n_s16(xmm, 1); out_ui16 = (uint16_t*) &out_b[((7 - kk) * nbyte + ii) / 8]; *out_ui16 = bt; } } count = bshuf_trans_bit_byte_remainder(in, out, size, elem_size, nbyte - nbyte % 16); return count; } /* Transpose bits within elements. */ int64_t bshuf_trans_bit_elem_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; CHECK_MULT_EIGHT(size); void* tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; count = bshuf_trans_byte_elem_NEON(in, out, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_trans_bit_byte_NEON(out, tmp_buf, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size); free(tmp_buf); return count; } /* For data organized into a row for each bit (8 * elem_size rows), transpose * the bytes. */ int64_t bshuf_trans_byte_bitrow_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { size_t ii, jj; const char* in_b = (const char*) in; char* out_b = (char*) out; CHECK_MULT_EIGHT(size); size_t nrows = 8 * elem_size; size_t nbyte_row = size / 8; int8x16_t a0, b0, c0, d0, e0, f0, g0, h0; int8x16_t a1, b1, c1, d1, e1, f1, g1, h1; int64x1_t *as, *bs, *cs, *ds, *es, *fs, *gs, *hs; for (ii = 0; ii + 7 < nrows; ii += 8) { for (jj = 0; jj + 15 < nbyte_row; jj += 16) { a0 = vld1q_s8(in_b + (ii + 0)*nbyte_row + jj); b0 = vld1q_s8(in_b + (ii + 1)*nbyte_row + jj); c0 = vld1q_s8(in_b + (ii + 2)*nbyte_row + jj); d0 = vld1q_s8(in_b + (ii + 3)*nbyte_row + jj); e0 = vld1q_s8(in_b + (ii + 4)*nbyte_row + jj); f0 = vld1q_s8(in_b + (ii + 5)*nbyte_row + jj); g0 = vld1q_s8(in_b + (ii + 6)*nbyte_row + jj); h0 = vld1q_s8(in_b + (ii + 7)*nbyte_row + jj); a1 = vzip1q_s8(a0, b0); b1 = vzip1q_s8(c0, d0); c1 = vzip1q_s8(e0, f0); d1 = vzip1q_s8(g0, h0); e1 = vzip2q_s8(a0, b0); f1 = vzip2q_s8(c0, d0); g1 = vzip2q_s8(e0, f0); h1 = vzip2q_s8(g0, h0); a0 = (int8x16_t) vzip1q_s16 (vreinterpretq_s16_s8 (a1), vreinterpretq_s16_s8 (b1)); b0= (int8x16_t) vzip1q_s16 (vreinterpretq_s16_s8 (c1), vreinterpretq_s16_s8 (d1)); c0 = (int8x16_t) vzip2q_s16 (vreinterpretq_s16_s8 (a1), vreinterpretq_s16_s8 (b1)); d0 = (int8x16_t) vzip2q_s16 (vreinterpretq_s16_s8 (c1), vreinterpretq_s16_s8 (d1)); e0 = (int8x16_t) vzip1q_s16 (vreinterpretq_s16_s8 (e1), vreinterpretq_s16_s8 (f1)); f0 = (int8x16_t) vzip1q_s16 (vreinterpretq_s16_s8 (g1), vreinterpretq_s16_s8 (h1)); g0 = (int8x16_t) vzip2q_s16 (vreinterpretq_s16_s8 (e1), vreinterpretq_s16_s8 (f1)); h0 = (int8x16_t) vzip2q_s16 (vreinterpretq_s16_s8 (g1), vreinterpretq_s16_s8 (h1)); a1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (a0), vreinterpretq_s32_s8 (b0)); b1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (a0), vreinterpretq_s32_s8 (b0)); c1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (c0), vreinterpretq_s32_s8 (d0)); d1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (c0), vreinterpretq_s32_s8 (d0)); e1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (e0), vreinterpretq_s32_s8 (f0)); f1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (e0), vreinterpretq_s32_s8 (f0)); g1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (g0), vreinterpretq_s32_s8 (h0)); h1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (g0), vreinterpretq_s32_s8 (h0)); as = (int64x1_t *) &a1; bs = (int64x1_t *) &b1; cs = (int64x1_t *) &c1; ds = (int64x1_t *) &d1; es = (int64x1_t *) &e1; fs = (int64x1_t *) &f1; gs = (int64x1_t *) &g1; hs = (int64x1_t *) &h1; vst1_s64((int64_t *)(out_b + (jj + 0) * nrows + ii), *as); vst1_s64((int64_t *)(out_b + (jj + 1) * nrows + ii), *(as + 1)); vst1_s64((int64_t *)(out_b + (jj + 2) * nrows + ii), *bs); vst1_s64((int64_t *)(out_b + (jj + 3) * nrows + ii), *(bs + 1)); vst1_s64((int64_t *)(out_b + (jj + 4) * nrows + ii), *cs); vst1_s64((int64_t *)(out_b + (jj + 5) * nrows + ii), *(cs + 1)); vst1_s64((int64_t *)(out_b + (jj + 6) * nrows + ii), *ds); vst1_s64((int64_t *)(out_b + (jj + 7) * nrows + ii), *(ds + 1)); vst1_s64((int64_t *)(out_b + (jj + 8) * nrows + ii), *es); vst1_s64((int64_t *)(out_b + (jj + 9) * nrows + ii), *(es + 1)); vst1_s64((int64_t *)(out_b + (jj + 10) * nrows + ii), *fs); vst1_s64((int64_t *)(out_b + (jj + 11) * nrows + ii), *(fs + 1)); vst1_s64((int64_t *)(out_b + (jj + 12) * nrows + ii), *gs); vst1_s64((int64_t *)(out_b + (jj + 13) * nrows + ii), *(gs + 1)); vst1_s64((int64_t *)(out_b + (jj + 14) * nrows + ii), *hs); vst1_s64((int64_t *)(out_b + (jj + 15) * nrows + ii), *(hs + 1)); } for (jj = nbyte_row - nbyte_row % 16; jj < nbyte_row; jj ++) { out_b[jj * nrows + ii + 0] = in_b[(ii + 0)*nbyte_row + jj]; out_b[jj * nrows + ii + 1] = in_b[(ii + 1)*nbyte_row + jj]; out_b[jj * nrows + ii + 2] = in_b[(ii + 2)*nbyte_row + jj]; out_b[jj * nrows + ii + 3] = in_b[(ii + 3)*nbyte_row + jj]; out_b[jj * nrows + ii + 4] = in_b[(ii + 4)*nbyte_row + jj]; out_b[jj * nrows + ii + 5] = in_b[(ii + 5)*nbyte_row + jj]; out_b[jj * nrows + ii + 6] = in_b[(ii + 6)*nbyte_row + jj]; out_b[jj * nrows + ii + 7] = in_b[(ii + 7)*nbyte_row + jj]; } } return size * elem_size; } /* Shuffle bits within the bytes of eight element blocks. */ int64_t bshuf_shuffle_bit_eightelem_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { CHECK_MULT_EIGHT(size); // With a bit of care, this could be written such that such that it is // in_buf = out_buf safe. const char* in_b = (const char*) in; uint16_t* out_ui16 = (uint16_t*) out; size_t ii, jj, kk; size_t nbyte = elem_size * size; int16x8_t xmm; int32_t bt; if (elem_size % 2) { bshuf_shuffle_bit_eightelem_scal(in, out, size, elem_size); } else { for (ii = 0; ii + 8 * elem_size - 1 < nbyte; ii += 8 * elem_size) { for (jj = 0; jj + 15 < 8 * elem_size; jj += 16) { xmm = vld1q_s16((int16_t *) &in_b[ii + jj]); for (kk = 0; kk < 8; kk++) { bt = move_byte_mask_neon((uint8x16_t) xmm); xmm = vshlq_n_s16(xmm, 1); size_t ind = (ii + jj / 8 + (7 - kk) * elem_size); out_ui16[ind / 2] = bt; } } } } return size * elem_size; } /* Untranspose bits within elements. */ int64_t bshuf_untrans_bit_elem_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; CHECK_MULT_EIGHT(size); void* tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; count = bshuf_trans_byte_bitrow_NEON(in, tmp_buf, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_shuffle_bit_eightelem_NEON(tmp_buf, out, size, elem_size); free(tmp_buf); return count; } #else // #ifdef USEARMNEON int64_t bshuf_untrans_bit_elem_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { return -13; } int64_t bshuf_trans_bit_elem_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { return -13; } int64_t bshuf_trans_byte_bitrow_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { return -13; } int64_t bshuf_trans_bit_byte_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { return -13; } int64_t bshuf_trans_byte_elem_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { return -13; } int64_t bshuf_trans_byte_elem_NEON_64(const void* in, void* out, const size_t size) { return -13; } int64_t bshuf_trans_byte_elem_NEON_32(const void* in, void* out, const size_t size) { return -13; } int64_t bshuf_trans_byte_elem_NEON_16(const void* in, void* out, const size_t size) { return -13; } int64_t bshuf_shuffle_bit_eightelem_NEON(const void* in, void* out, const size_t size, const size_t elem_size) { return -13; } #endif /* ---- Worker code that uses SSE2 ---- * * The following code makes use of the SSE2 instruction set and specialized * 16 byte registers. The SSE2 instructions are present on modern x86 * processors. The first Intel processor microarchitecture supporting SSE2 was * Pentium 4 (2000). * */ #ifdef USESSE2 /* Transpose bytes within elements for 16 bit elements. */ int64_t bshuf_trans_byte_elem_SSE_16(const void* in, void* out, const size_t size) { size_t ii; const char *in_b = (const char*) in; char *out_b = (char*) out; __m128i a0, b0, a1, b1; for (ii=0; ii + 15 < size; ii += 16) { a0 = _mm_loadu_si128((__m128i *) &in_b[2*ii + 0*16]); b0 = _mm_loadu_si128((__m128i *) &in_b[2*ii + 1*16]); a1 = _mm_unpacklo_epi8(a0, b0); b1 = _mm_unpackhi_epi8(a0, b0); a0 = _mm_unpacklo_epi8(a1, b1); b0 = _mm_unpackhi_epi8(a1, b1); a1 = _mm_unpacklo_epi8(a0, b0); b1 = _mm_unpackhi_epi8(a0, b0); a0 = _mm_unpacklo_epi8(a1, b1); b0 = _mm_unpackhi_epi8(a1, b1); _mm_storeu_si128((__m128i *) &out_b[0*size + ii], a0); _mm_storeu_si128((__m128i *) &out_b[1*size + ii], b0); } return bshuf_trans_byte_elem_remainder(in, out, size, 2, size - size % 16); } /* Transpose bytes within elements for 32 bit elements. */ int64_t bshuf_trans_byte_elem_SSE_32(const void* in, void* out, const size_t size) { size_t ii; const char *in_b; char *out_b; in_b = (const char*) in; out_b = (char*) out; __m128i a0, b0, c0, d0, a1, b1, c1, d1; for (ii=0; ii + 15 < size; ii += 16) { a0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 0*16]); b0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 1*16]); c0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 2*16]); d0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 3*16]); a1 = _mm_unpacklo_epi8(a0, b0); b1 = _mm_unpackhi_epi8(a0, b0); c1 = _mm_unpacklo_epi8(c0, d0); d1 = _mm_unpackhi_epi8(c0, d0); a0 = _mm_unpacklo_epi8(a1, b1); b0 = _mm_unpackhi_epi8(a1, b1); c0 = _mm_unpacklo_epi8(c1, d1); d0 = _mm_unpackhi_epi8(c1, d1); a1 = _mm_unpacklo_epi8(a0, b0); b1 = _mm_unpackhi_epi8(a0, b0); c1 = _mm_unpacklo_epi8(c0, d0); d1 = _mm_unpackhi_epi8(c0, d0); a0 = _mm_unpacklo_epi64(a1, c1); b0 = _mm_unpackhi_epi64(a1, c1); c0 = _mm_unpacklo_epi64(b1, d1); d0 = _mm_unpackhi_epi64(b1, d1); _mm_storeu_si128((__m128i *) &out_b[0*size + ii], a0); _mm_storeu_si128((__m128i *) &out_b[1*size + ii], b0); _mm_storeu_si128((__m128i *) &out_b[2*size + ii], c0); _mm_storeu_si128((__m128i *) &out_b[3*size + ii], d0); } return bshuf_trans_byte_elem_remainder(in, out, size, 4, size - size % 16); } /* Transpose bytes within elements for 64 bit elements. */ int64_t bshuf_trans_byte_elem_SSE_64(const void* in, void* out, const size_t size) { size_t ii; const char* in_b = (const char*) in; char* out_b = (char*) out; __m128i a0, b0, c0, d0, e0, f0, g0, h0; __m128i a1, b1, c1, d1, e1, f1, g1, h1; for (ii=0; ii + 15 < size; ii += 16) { a0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 0*16]); b0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 1*16]); c0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 2*16]); d0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 3*16]); e0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 4*16]); f0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 5*16]); g0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 6*16]); h0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 7*16]); a1 = _mm_unpacklo_epi8(a0, b0); b1 = _mm_unpackhi_epi8(a0, b0); c1 = _mm_unpacklo_epi8(c0, d0); d1 = _mm_unpackhi_epi8(c0, d0); e1 = _mm_unpacklo_epi8(e0, f0); f1 = _mm_unpackhi_epi8(e0, f0); g1 = _mm_unpacklo_epi8(g0, h0); h1 = _mm_unpackhi_epi8(g0, h0); a0 = _mm_unpacklo_epi8(a1, b1); b0 = _mm_unpackhi_epi8(a1, b1); c0 = _mm_unpacklo_epi8(c1, d1); d0 = _mm_unpackhi_epi8(c1, d1); e0 = _mm_unpacklo_epi8(e1, f1); f0 = _mm_unpackhi_epi8(e1, f1); g0 = _mm_unpacklo_epi8(g1, h1); h0 = _mm_unpackhi_epi8(g1, h1); a1 = _mm_unpacklo_epi32(a0, c0); b1 = _mm_unpackhi_epi32(a0, c0); c1 = _mm_unpacklo_epi32(b0, d0); d1 = _mm_unpackhi_epi32(b0, d0); e1 = _mm_unpacklo_epi32(e0, g0); f1 = _mm_unpackhi_epi32(e0, g0); g1 = _mm_unpacklo_epi32(f0, h0); h1 = _mm_unpackhi_epi32(f0, h0); a0 = _mm_unpacklo_epi64(a1, e1); b0 = _mm_unpackhi_epi64(a1, e1); c0 = _mm_unpacklo_epi64(b1, f1); d0 = _mm_unpackhi_epi64(b1, f1); e0 = _mm_unpacklo_epi64(c1, g1); f0 = _mm_unpackhi_epi64(c1, g1); g0 = _mm_unpacklo_epi64(d1, h1); h0 = _mm_unpackhi_epi64(d1, h1); _mm_storeu_si128((__m128i *) &out_b[0*size + ii], a0); _mm_storeu_si128((__m128i *) &out_b[1*size + ii], b0); _mm_storeu_si128((__m128i *) &out_b[2*size + ii], c0); _mm_storeu_si128((__m128i *) &out_b[3*size + ii], d0); _mm_storeu_si128((__m128i *) &out_b[4*size + ii], e0); _mm_storeu_si128((__m128i *) &out_b[5*size + ii], f0); _mm_storeu_si128((__m128i *) &out_b[6*size + ii], g0); _mm_storeu_si128((__m128i *) &out_b[7*size + ii], h0); } return bshuf_trans_byte_elem_remainder(in, out, size, 8, size - size % 16); } /* Transpose bytes within elements using best SSE algorithm available. */ int64_t bshuf_trans_byte_elem_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; // Trivial cases: power of 2 bytes. switch (elem_size) { case 1: count = bshuf_copy(in, out, size, elem_size); return count; case 2: count = bshuf_trans_byte_elem_SSE_16(in, out, size); return count; case 4: count = bshuf_trans_byte_elem_SSE_32(in, out, size); return count; case 8: count = bshuf_trans_byte_elem_SSE_64(in, out, size); return count; } // Worst case: odd number of bytes. Turns out that this is faster for // (odd * 2) byte elements as well (hence % 4). if (elem_size % 4) { count = bshuf_trans_byte_elem_scal(in, out, size, elem_size); return count; } // Multiple of power of 2: transpose hierarchically. { size_t nchunk_elem; void* tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; if ((elem_size % 8) == 0) { nchunk_elem = elem_size / 8; TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int64_t); count = bshuf_trans_byte_elem_SSE_64(out, tmp_buf, size * nchunk_elem); bshuf_trans_elem(tmp_buf, out, 8, nchunk_elem, size); } else if ((elem_size % 4) == 0) { nchunk_elem = elem_size / 4; TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int32_t); count = bshuf_trans_byte_elem_SSE_32(out, tmp_buf, size * nchunk_elem); bshuf_trans_elem(tmp_buf, out, 4, nchunk_elem, size); } else { // Not used since scalar algorithm is faster. nchunk_elem = elem_size / 2; TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int16_t); count = bshuf_trans_byte_elem_SSE_16(out, tmp_buf, size * nchunk_elem); bshuf_trans_elem(tmp_buf, out, 2, nchunk_elem, size); } free(tmp_buf); return count; } } /* Transpose bits within bytes. */ int64_t bshuf_trans_bit_byte_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { size_t ii, kk; const char* in_b = (const char*) in; char* out_b = (char*) out; uint16_t* out_ui16; int64_t count; size_t nbyte = elem_size * size; CHECK_MULT_EIGHT(nbyte); __m128i xmm; int32_t bt; for (ii = 0; ii + 15 < nbyte; ii += 16) { xmm = _mm_loadu_si128((__m128i *) &in_b[ii]); for (kk = 0; kk < 8; kk++) { bt = _mm_movemask_epi8(xmm); xmm = _mm_slli_epi16(xmm, 1); out_ui16 = (uint16_t*) &out_b[((7 - kk) * nbyte + ii) / 8]; *out_ui16 = bt; } } count = bshuf_trans_bit_byte_remainder(in, out, size, elem_size, nbyte - nbyte % 16); return count; } /* Transpose bits within elements. */ int64_t bshuf_trans_bit_elem_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; CHECK_MULT_EIGHT(size); void* tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; count = bshuf_trans_byte_elem_SSE(in, out, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_trans_bit_byte_SSE(out, tmp_buf, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size); free(tmp_buf); return count; } /* For data organized into a row for each bit (8 * elem_size rows), transpose * the bytes. */ int64_t bshuf_trans_byte_bitrow_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { size_t ii, jj; const char* in_b = (const char*) in; char* out_b = (char*) out; CHECK_MULT_EIGHT(size); size_t nrows = 8 * elem_size; size_t nbyte_row = size / 8; __m128i a0, b0, c0, d0, e0, f0, g0, h0; __m128i a1, b1, c1, d1, e1, f1, g1, h1; __m128 *as, *bs, *cs, *ds, *es, *fs, *gs, *hs; for (ii = 0; ii + 7 < nrows; ii += 8) { for (jj = 0; jj + 15 < nbyte_row; jj += 16) { a0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 0)*nbyte_row + jj]); b0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 1)*nbyte_row + jj]); c0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 2)*nbyte_row + jj]); d0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 3)*nbyte_row + jj]); e0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 4)*nbyte_row + jj]); f0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 5)*nbyte_row + jj]); g0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 6)*nbyte_row + jj]); h0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 7)*nbyte_row + jj]); a1 = _mm_unpacklo_epi8(a0, b0); b1 = _mm_unpacklo_epi8(c0, d0); c1 = _mm_unpacklo_epi8(e0, f0); d1 = _mm_unpacklo_epi8(g0, h0); e1 = _mm_unpackhi_epi8(a0, b0); f1 = _mm_unpackhi_epi8(c0, d0); g1 = _mm_unpackhi_epi8(e0, f0); h1 = _mm_unpackhi_epi8(g0, h0); a0 = _mm_unpacklo_epi16(a1, b1); b0 = _mm_unpacklo_epi16(c1, d1); c0 = _mm_unpackhi_epi16(a1, b1); d0 = _mm_unpackhi_epi16(c1, d1); e0 = _mm_unpacklo_epi16(e1, f1); f0 = _mm_unpacklo_epi16(g1, h1); g0 = _mm_unpackhi_epi16(e1, f1); h0 = _mm_unpackhi_epi16(g1, h1); a1 = _mm_unpacklo_epi32(a0, b0); b1 = _mm_unpackhi_epi32(a0, b0); c1 = _mm_unpacklo_epi32(c0, d0); d1 = _mm_unpackhi_epi32(c0, d0); e1 = _mm_unpacklo_epi32(e0, f0); f1 = _mm_unpackhi_epi32(e0, f0); g1 = _mm_unpacklo_epi32(g0, h0); h1 = _mm_unpackhi_epi32(g0, h0); // We don't have a storeh instruction for integers, so interpret // as a float. Have a storel (_mm_storel_epi64). as = (__m128 *) &a1; bs = (__m128 *) &b1; cs = (__m128 *) &c1; ds = (__m128 *) &d1; es = (__m128 *) &e1; fs = (__m128 *) &f1; gs = (__m128 *) &g1; hs = (__m128 *) &h1; _mm_storel_pi((__m64 *) &out_b[(jj + 0) * nrows + ii], *as); _mm_storel_pi((__m64 *) &out_b[(jj + 2) * nrows + ii], *bs); _mm_storel_pi((__m64 *) &out_b[(jj + 4) * nrows + ii], *cs); _mm_storel_pi((__m64 *) &out_b[(jj + 6) * nrows + ii], *ds); _mm_storel_pi((__m64 *) &out_b[(jj + 8) * nrows + ii], *es); _mm_storel_pi((__m64 *) &out_b[(jj + 10) * nrows + ii], *fs); _mm_storel_pi((__m64 *) &out_b[(jj + 12) * nrows + ii], *gs); _mm_storel_pi((__m64 *) &out_b[(jj + 14) * nrows + ii], *hs); _mm_storeh_pi((__m64 *) &out_b[(jj + 1) * nrows + ii], *as); _mm_storeh_pi((__m64 *) &out_b[(jj + 3) * nrows + ii], *bs); _mm_storeh_pi((__m64 *) &out_b[(jj + 5) * nrows + ii], *cs); _mm_storeh_pi((__m64 *) &out_b[(jj + 7) * nrows + ii], *ds); _mm_storeh_pi((__m64 *) &out_b[(jj + 9) * nrows + ii], *es); _mm_storeh_pi((__m64 *) &out_b[(jj + 11) * nrows + ii], *fs); _mm_storeh_pi((__m64 *) &out_b[(jj + 13) * nrows + ii], *gs); _mm_storeh_pi((__m64 *) &out_b[(jj + 15) * nrows + ii], *hs); } for (jj = nbyte_row - nbyte_row % 16; jj < nbyte_row; jj ++) { out_b[jj * nrows + ii + 0] = in_b[(ii + 0)*nbyte_row + jj]; out_b[jj * nrows + ii + 1] = in_b[(ii + 1)*nbyte_row + jj]; out_b[jj * nrows + ii + 2] = in_b[(ii + 2)*nbyte_row + jj]; out_b[jj * nrows + ii + 3] = in_b[(ii + 3)*nbyte_row + jj]; out_b[jj * nrows + ii + 4] = in_b[(ii + 4)*nbyte_row + jj]; out_b[jj * nrows + ii + 5] = in_b[(ii + 5)*nbyte_row + jj]; out_b[jj * nrows + ii + 6] = in_b[(ii + 6)*nbyte_row + jj]; out_b[jj * nrows + ii + 7] = in_b[(ii + 7)*nbyte_row + jj]; } } return size * elem_size; } /* Shuffle bits within the bytes of eight element blocks. */ int64_t bshuf_shuffle_bit_eightelem_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { CHECK_MULT_EIGHT(size); // With a bit of care, this could be written such that such that it is // in_buf = out_buf safe. const char* in_b = (const char*) in; uint16_t* out_ui16 = (uint16_t*) out; size_t ii, jj, kk; size_t nbyte = elem_size * size; __m128i xmm; int32_t bt; if (elem_size % 2) { bshuf_shuffle_bit_eightelem_scal(in, out, size, elem_size); } else { for (ii = 0; ii + 8 * elem_size - 1 < nbyte; ii += 8 * elem_size) { for (jj = 0; jj + 15 < 8 * elem_size; jj += 16) { xmm = _mm_loadu_si128((__m128i *) &in_b[ii + jj]); for (kk = 0; kk < 8; kk++) { bt = _mm_movemask_epi8(xmm); xmm = _mm_slli_epi16(xmm, 1); size_t ind = (ii + jj / 8 + (7 - kk) * elem_size); out_ui16[ind / 2] = bt; } } } } return size * elem_size; } /* Untranspose bits within elements. */ int64_t bshuf_untrans_bit_elem_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; CHECK_MULT_EIGHT(size); void* tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; count = bshuf_trans_byte_bitrow_SSE(in, tmp_buf, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_shuffle_bit_eightelem_SSE(tmp_buf, out, size, elem_size); free(tmp_buf); return count; } #else // #ifdef USESSE2 int64_t bshuf_untrans_bit_elem_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { return -11; } int64_t bshuf_trans_bit_elem_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { return -11; } int64_t bshuf_trans_byte_bitrow_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { return -11; } int64_t bshuf_trans_bit_byte_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { return -11; } int64_t bshuf_trans_byte_elem_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { return -11; } int64_t bshuf_trans_byte_elem_SSE_64(const void* in, void* out, const size_t size) { return -11; } int64_t bshuf_trans_byte_elem_SSE_32(const void* in, void* out, const size_t size) { return -11; } int64_t bshuf_trans_byte_elem_SSE_16(const void* in, void* out, const size_t size) { return -11; } int64_t bshuf_shuffle_bit_eightelem_SSE(const void* in, void* out, const size_t size, const size_t elem_size) { return -11; } #endif // #ifdef USESSE2 /* ---- Code that requires AVX2. Intel Haswell (2013) and later. ---- */ /* ---- Worker code that uses AVX2 ---- * * The following code makes use of the AVX2 instruction set and specialized * 32 byte registers. The AVX2 instructions are present on newer x86 * processors. The first Intel processor microarchitecture supporting AVX2 was * Haswell (2013). * */ #ifdef USEAVX2 /* Transpose bits within bytes. */ int64_t bshuf_trans_bit_byte_AVX(const void* in, void* out, const size_t size, const size_t elem_size) { size_t ii, kk; const char* in_b = (const char*) in; char* out_b = (char*) out; int32_t* out_i32; size_t nbyte = elem_size * size; int64_t count; __m256i ymm; int32_t bt; for (ii = 0; ii + 31 < nbyte; ii += 32) { ymm = _mm256_loadu_si256((__m256i *) &in_b[ii]); for (kk = 0; kk < 8; kk++) { bt = _mm256_movemask_epi8(ymm); ymm = _mm256_slli_epi16(ymm, 1); out_i32 = (int32_t*) &out_b[((7 - kk) * nbyte + ii) / 8]; *out_i32 = bt; } } count = bshuf_trans_bit_byte_remainder(in, out, size, elem_size, nbyte - nbyte % 32); return count; } /* Transpose bits within elements. */ int64_t bshuf_trans_bit_elem_AVX(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; CHECK_MULT_EIGHT(size); void* tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; count = bshuf_trans_byte_elem_SSE(in, out, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_trans_bit_byte_AVX(out, tmp_buf, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size); free(tmp_buf); return count; } /* For data organized into a row for each bit (8 * elem_size rows), transpose * the bytes. */ int64_t bshuf_trans_byte_bitrow_AVX(const void* in, void* out, const size_t size, const size_t elem_size) { size_t hh, ii, jj, kk, mm; const char* in_b = (const char*) in; char* out_b = (char*) out; CHECK_MULT_EIGHT(size); size_t nrows = 8 * elem_size; size_t nbyte_row = size / 8; if (elem_size % 4) return bshuf_trans_byte_bitrow_SSE(in, out, size, elem_size); __m256i ymm_0[8]; __m256i ymm_1[8]; __m256i ymm_storeage[8][4]; for (jj = 0; jj + 31 < nbyte_row; jj += 32) { for (ii = 0; ii + 3 < elem_size; ii += 4) { for (hh = 0; hh < 4; hh ++) { for (kk = 0; kk < 8; kk ++){ ymm_0[kk] = _mm256_loadu_si256((__m256i *) &in_b[ (ii * 8 + hh * 8 + kk) * nbyte_row + jj]); } for (kk = 0; kk < 4; kk ++){ ymm_1[kk] = _mm256_unpacklo_epi8(ymm_0[kk * 2], ymm_0[kk * 2 + 1]); ymm_1[kk + 4] = _mm256_unpackhi_epi8(ymm_0[kk * 2], ymm_0[kk * 2 + 1]); } for (kk = 0; kk < 2; kk ++){ for (mm = 0; mm < 2; mm ++){ ymm_0[kk * 4 + mm] = _mm256_unpacklo_epi16( ymm_1[kk * 4 + mm * 2], ymm_1[kk * 4 + mm * 2 + 1]); ymm_0[kk * 4 + mm + 2] = _mm256_unpackhi_epi16( ymm_1[kk * 4 + mm * 2], ymm_1[kk * 4 + mm * 2 + 1]); } } for (kk = 0; kk < 4; kk ++){ ymm_1[kk * 2] = _mm256_unpacklo_epi32(ymm_0[kk * 2], ymm_0[kk * 2 + 1]); ymm_1[kk * 2 + 1] = _mm256_unpackhi_epi32(ymm_0[kk * 2], ymm_0[kk * 2 + 1]); } for (kk = 0; kk < 8; kk ++){ ymm_storeage[kk][hh] = ymm_1[kk]; } } for (mm = 0; mm < 8; mm ++) { for (kk = 0; kk < 4; kk ++){ ymm_0[kk] = ymm_storeage[mm][kk]; } ymm_1[0] = _mm256_unpacklo_epi64(ymm_0[0], ymm_0[1]); ymm_1[1] = _mm256_unpacklo_epi64(ymm_0[2], ymm_0[3]); ymm_1[2] = _mm256_unpackhi_epi64(ymm_0[0], ymm_0[1]); ymm_1[3] = _mm256_unpackhi_epi64(ymm_0[2], ymm_0[3]); ymm_0[0] = _mm256_permute2x128_si256(ymm_1[0], ymm_1[1], 32); ymm_0[1] = _mm256_permute2x128_si256(ymm_1[2], ymm_1[3], 32); ymm_0[2] = _mm256_permute2x128_si256(ymm_1[0], ymm_1[1], 49); ymm_0[3] = _mm256_permute2x128_si256(ymm_1[2], ymm_1[3], 49); _mm256_storeu_si256((__m256i *) &out_b[ (jj + mm * 2 + 0 * 16) * nrows + ii * 8], ymm_0[0]); _mm256_storeu_si256((__m256i *) &out_b[ (jj + mm * 2 + 0 * 16 + 1) * nrows + ii * 8], ymm_0[1]); _mm256_storeu_si256((__m256i *) &out_b[ (jj + mm * 2 + 1 * 16) * nrows + ii * 8], ymm_0[2]); _mm256_storeu_si256((__m256i *) &out_b[ (jj + mm * 2 + 1 * 16 + 1) * nrows + ii * 8], ymm_0[3]); } } } for (ii = 0; ii < nrows; ii ++ ) { for (jj = nbyte_row - nbyte_row % 32; jj < nbyte_row; jj ++) { out_b[jj * nrows + ii] = in_b[ii * nbyte_row + jj]; } } return size * elem_size; } /* Shuffle bits within the bytes of eight element blocks. */ int64_t bshuf_shuffle_bit_eightelem_AVX(const void* in, void* out, const size_t size, const size_t elem_size) { CHECK_MULT_EIGHT(size); // With a bit of care, this could be written such that such that it is // in_buf = out_buf safe. const char* in_b = (const char*) in; char* out_b = (char*) out; size_t ii, jj, kk; size_t nbyte = elem_size * size; __m256i ymm; int32_t bt; if (elem_size % 4) { return bshuf_shuffle_bit_eightelem_SSE(in, out, size, elem_size); } else { for (jj = 0; jj + 31 < 8 * elem_size; jj += 32) { for (ii = 0; ii + 8 * elem_size - 1 < nbyte; ii += 8 * elem_size) { ymm = _mm256_loadu_si256((__m256i *) &in_b[ii + jj]); for (kk = 0; kk < 8; kk++) { bt = _mm256_movemask_epi8(ymm); ymm = _mm256_slli_epi16(ymm, 1); size_t ind = (ii + jj / 8 + (7 - kk) * elem_size); * (int32_t *) &out_b[ind] = bt; } } } } return size * elem_size; } /* Untranspose bits within elements. */ int64_t bshuf_untrans_bit_elem_AVX(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; CHECK_MULT_EIGHT(size); void* tmp_buf = malloc(size * elem_size); if (tmp_buf == NULL) return -1; count = bshuf_trans_byte_bitrow_AVX(in, tmp_buf, size, elem_size); CHECK_ERR_FREE(count, tmp_buf); count = bshuf_shuffle_bit_eightelem_AVX(tmp_buf, out, size, elem_size); free(tmp_buf); return count; } #else // #ifdef USEAVX2 int64_t bshuf_trans_bit_byte_AVX(const void* in, void* out, const size_t size, const size_t elem_size) { return -12; } int64_t bshuf_trans_bit_elem_AVX(const void* in, void* out, const size_t size, const size_t elem_size) { return -12; } int64_t bshuf_trans_byte_bitrow_AVX(const void* in, void* out, const size_t size, const size_t elem_size) { return -12; } int64_t bshuf_shuffle_bit_eightelem_AVX(const void* in, void* out, const size_t size, const size_t elem_size) { return -12; } int64_t bshuf_untrans_bit_elem_AVX(const void* in, void* out, const size_t size, const size_t elem_size) { return -12; } #endif // #ifdef USEAVX2 /* ---- Drivers selecting best instruction set at compile time. ---- */ int64_t bshuf_trans_bit_elem(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; #ifdef USEAVX2 count = bshuf_trans_bit_elem_AVX(in, out, size, elem_size); #elif defined(USESSE2) count = bshuf_trans_bit_elem_SSE(in, out, size, elem_size); #elif defined(USEARMNEON) count = bshuf_trans_bit_elem_NEON(in, out, size, elem_size); #else count = bshuf_trans_bit_elem_scal(in, out, size, elem_size); #endif return count; } int64_t bshuf_untrans_bit_elem(const void* in, void* out, const size_t size, const size_t elem_size) { int64_t count; #ifdef USEAVX2 count = bshuf_untrans_bit_elem_AVX(in, out, size, elem_size); #elif defined(USESSE2) count = bshuf_untrans_bit_elem_SSE(in, out, size, elem_size); #elif defined(USEARMNEON) count = bshuf_untrans_bit_elem_NEON(in, out, size, elem_size); #else count = bshuf_untrans_bit_elem_scal(in, out, size, elem_size); #endif return count; } /* ---- Wrappers for implementing blocking ---- */ /* Wrap a function for processing a single block to process an entire buffer in * parallel. */ int64_t bshuf_blocked_wrap_fun(bshufBlockFunDef fun, const void* in, void* out, \ const size_t size, const size_t elem_size, size_t block_size) { omp_size_t ii = 0; int64_t err = 0; int64_t count, cum_count=0; size_t last_block_size; size_t leftover_bytes; size_t this_iter; char *last_in; char *last_out; ioc_chain C; ioc_init(&C, in, out); if (block_size == 0) { block_size = bshuf_default_block_size(elem_size); } if (block_size % BSHUF_BLOCKED_MULT) return -81; #if defined(_OPENMP) #pragma omp parallel for schedule(dynamic, 1) \ private(count) reduction(+ : cum_count) #endif for (ii = 0; ii < (omp_size_t)( size / block_size ); ii ++) { count = fun(&C, block_size, elem_size); if (count < 0) err = count; cum_count += count; } last_block_size = size % block_size; last_block_size = last_block_size - last_block_size % BSHUF_BLOCKED_MULT; if (last_block_size) { count = fun(&C, last_block_size, elem_size); if (count < 0) err = count; cum_count += count; } if (err < 0) return err; leftover_bytes = size % BSHUF_BLOCKED_MULT * elem_size; //this_iter; last_in = (char *) ioc_get_in(&C, &this_iter); ioc_set_next_in(&C, &this_iter, (void *) (last_in + leftover_bytes)); last_out = (char *) ioc_get_out(&C, &this_iter); ioc_set_next_out(&C, &this_iter, (void *) (last_out + leftover_bytes)); memcpy(last_out, last_in, leftover_bytes); ioc_destroy(&C); return cum_count + leftover_bytes; } /* Bitshuffle a single block. */ int64_t bshuf_bitshuffle_block(ioc_chain *C_ptr, \ const size_t size, const size_t elem_size) { size_t this_iter; const void *in; void *out; int64_t count; in = ioc_get_in(C_ptr, &this_iter); ioc_set_next_in(C_ptr, &this_iter, (void*) ((char*) in + size * elem_size)); out = ioc_get_out(C_ptr, &this_iter); ioc_set_next_out(C_ptr, &this_iter, (void *) ((char *) out + size * elem_size)); count = bshuf_trans_bit_elem(in, out, size, elem_size); return count; } /* Bitunshuffle a single block. */ int64_t bshuf_bitunshuffle_block(ioc_chain* C_ptr, \ const size_t size, const size_t elem_size) { size_t this_iter; const void *in; void *out; int64_t count; in = ioc_get_in(C_ptr, &this_iter); ioc_set_next_in(C_ptr, &this_iter, (void*) ((char*) in + size * elem_size)); out = ioc_get_out(C_ptr, &this_iter); ioc_set_next_out(C_ptr, &this_iter, (void *) ((char *) out + size * elem_size)); count = bshuf_untrans_bit_elem(in, out, size, elem_size); return count; } /* Write a 64 bit unsigned integer to a buffer in big endian order. */ void bshuf_write_uint64_BE(void* buf, uint64_t num) { int ii; uint8_t* b = (uint8_t*) buf; uint64_t pow28 = 1 << 8; for (ii = 7; ii >= 0; ii--) { b[ii] = num % pow28; num = num / pow28; } } /* Read a 64 bit unsigned integer from a buffer big endian order. */ uint64_t bshuf_read_uint64_BE(void* buf) { int ii; uint8_t* b = (uint8_t*) buf; uint64_t num = 0, pow28 = 1 << 8, cp = 1; for (ii = 7; ii >= 0; ii--) { num += b[ii] * cp; cp *= pow28; } return num; } /* Write a 32 bit unsigned integer to a buffer in big endian order. */ void bshuf_write_uint32_BE(void* buf, uint32_t num) { int ii; uint8_t* b = (uint8_t*) buf; uint32_t pow28 = 1 << 8; for (ii = 3; ii >= 0; ii--) { b[ii] = num % pow28; num = num / pow28; } } /* Read a 32 bit unsigned integer from a buffer big endian order. */ uint32_t bshuf_read_uint32_BE(const void* buf) { int ii; uint8_t* b = (uint8_t*) buf; uint32_t num = 0, pow28 = 1 << 8, cp = 1; for (ii = 3; ii >= 0; ii--) { num += b[ii] * cp; cp *= pow28; } return num; } /* ---- Public functions ---- * * See header file for description and usage. * */ size_t bshuf_default_block_size(const size_t elem_size) { // This function needs to be absolutely stable between versions. // Otherwise encoded data will not be decodable. size_t block_size = BSHUF_TARGET_BLOCK_SIZE_B / elem_size; // Ensure it is a required multiple. block_size = (block_size / BSHUF_BLOCKED_MULT) * BSHUF_BLOCKED_MULT; return MAX(block_size, BSHUF_MIN_RECOMMEND_BLOCK); } int64_t bshuf_bitshuffle(const void* in, void* out, const size_t size, const size_t elem_size, size_t block_size) { return bshuf_blocked_wrap_fun(&bshuf_bitshuffle_block, in, out, size, elem_size, block_size); } int64_t bshuf_bitunshuffle(const void* in, void* out, const size_t size, const size_t elem_size, size_t block_size) { return bshuf_blocked_wrap_fun(&bshuf_bitunshuffle_block, in, out, size, elem_size, block_size); } #undef TRANS_BIT_8X8 #undef TRANS_ELEM_TYPE #undef MAX #undef CHECK_MULT_EIGHT #undef CHECK_ERR_FREE #undef USESSE2 #undef USEAVX2
HVFinder.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #define PI 3.141592 #define SUB 1e+10 /*Structure for the particles in the original catalog*/ typedef struct Particle { float pos[3]; /*Array with the position of the particle*/ } PARTICLE; /*List with the particles around the center of the Spherical void*/ typedef struct Radius { int label; /*Label of teh particle*/ float dist; /*Distance for the center*/ } RAD; /*Structure used to allocate the particles in the grid*/ typedef struct Grid_Particles { int label; /*Label of the particle*/ float pos[3]; /*Array with the position of the particle*/ } GRID; /*Structure used to temporary allocate the centers*/ typedef struct Center { float pos[3]; /*Array with the position of the center*/ float den; /*Density of the central particle in the halo*/ int label; /*Label of the central particle*/ } CENTER; /*Structure used to allocate the centers*/ typedef struct Center2 { float pos[3]; /*Array with the position of the center*/ int label; /*Label of the central particle*/ } CENTER2; /*Structure with the final Halos/Voids*/ typedef struct Halo_Void { int label; float pos[3]; float R; float errR; int np; float den; } HV; /*Give a indice for the partricle*/ void indices(float x[], int xt[], float Lb, int nd){ xt[0] = (int) x[0]/Lb; xt[1] = (int) x[1]/Lb; xt[2] = (int) x[2]/Lb; if(xt[0]==nd) xt[0] -=1; if(xt[1]==nd) xt[1] -=1; if(xt[2]==nd) xt[2] -=1; } /*Partition function used in the radius quicksort*/ int partition( RAD a[], int l, int r) { int i, j; RAD pivot, t; pivot.label = a[l].label; pivot.dist = a[l].dist; i = l; j = r+1; while( 1){ do ++i; while( a[i].dist <= pivot.dist && i <= r ); do --j; while( a[j].dist > pivot.dist ); if( i >= j ) break; t.dist = a[i].dist; a[i].dist = a[j].dist; a[j].dist = t.dist; t.label = a[i].label; a[i].label = a[j].label; a[j].label = t.label; } t.dist = a[l].dist; a[l].dist = a[j].dist; a[j].dist= t.dist; t.label = a[l].label; a[l].label = a[j].label; a[j].label = t.label; return j; } /*The quicksort algorithm to sort the RAD list*/ void quickSort( RAD a[], int l, int r){ int j; if( l < r ){ // divide and conquer j = partition( a, l, r); quickSort( a, l, j-1); quickSort( a, j+1, r); } } /*Partition function used in the radius quicksort*/ int partition_center(CENTER a[], int l, int r) { int i, j, k; CENTER pivot, t; pivot.label = a[l].label; pivot.den = a[l].den; i = l; j = r+1; while( 1){ do ++i; while( a[i].den <= pivot.den && i <= r ); do --j; while( a[j].den > pivot.den ); if( i >= j ) break; t.den = a[i].den; a[i].den = a[j].den; a[j].den = t.den; t.label = a[i].label; a[i].label = a[j].label; a[j].label = t.label; for(k=0;k<3;k++){ t.pos[k] = a[i].pos[k]; a[i].pos[k] = a[j].pos[k]; a[j].pos[k] = t.pos[k]; } } t.den = a[l].den; a[l].den = a[j].den; a[j].den= t.den; t.label = a[l].label; a[l].label = a[j].label; a[j].label = t.label; for(k=0;k<3;k++){ t.pos[k] = a[l].pos[k]; a[l].pos[k] = a[j].pos[k]; a[j].pos[k] = t.pos[k]; } return j; } /*The quicksort algorithm to sort the center list*/ void quickSort_center(CENTER a[], int l, int r){ int j; if( l < r ){ // divide and conquer j = partition_center( a, l, r); quickSort_center( a, l, j-1); quickSort_center( a, j+1, r); } } int main(int argc,char *argv[]) { FILE *part, *outh, *outv, *crit, *outp; char partfile[100], outhfile[100], outvfile[100], critfile[100], propfile[100]; int i, j, k, l, a, b, c, cont, h_vel, N_cores, Ns, np, np2, nbh, nbv, nd, nh, nv, DOhv, n, div, rest, conth, contv; int ***cont1, ***cont2, xt[3], *flag, percent; float L, Lch, Lcv, Lb, Lm, n_m, denTOT, Dh, Dv, xc[3], den; PARTICLE *p; GRID ****m; HV *hv; CENTER *centerh, *centerv; CENTER2 **cen; /*Check all inputs*/ if (argc != 10){ printf("Wrong number of arguments.\n"); printf("arg1: The preffix of the input file\n"); printf("arg2: The preffix of the output file\n"); printf("arg3: Find the Halos (0), Voids (1) or both (2)?\n"); printf("arg4: Halo's caracteristic lenght.\n"); printf("arg5: Void's caracteristic lenght.\n"); printf("arg6: Halo's contrast density.\n"); printf("arg7: Void's contrast density.\n"); printf("arg8: Number of critical files.\n"); printf("arg9: Give the number of cores for the parallel computation.\n\n"); exit(0); } /*Get the name of all files*/ sprintf(partfile,"%s", argv[1]); sprintf(outhfile,"%s_halos.dat", argv[2]); sprintf(outvfile,"%s_voids.dat", argv[2]); sprintf(propfile,"%s_props.dat", argv[2]); /*Read the parameters given by the user*/ DOhv = atoi(argv[3]); /*Finde halos, voids or both parameter*/ Lch = atof(argv[4]); /*Caracteristic lenght of halos*/ Lcv = atof(argv[5]); /*Caracteristic lenght of halos*/ Dh = atof(argv[6]); /*Halo's contrast density*/ Dv = atof(argv[7]); /*Void's contrast density*/ Ns = atoi(argv[8]); /*Number critical files to be read*/ N_cores = atoi(argv[9]); /*Number of cores for the parallelization*/ /*Check if the kind of finder is ok*/ if(DOhv != 0 && DOhv !=1 && DOhv != 2){ printf("You need to say the kind of structure that you need to find, Halos (0), Voids (1) or both (2)?\n"); exit(0); } /*Open the particles file*/ part = fopen(partfile,"rb"); if (part == NULL) { printf("Unable to open %s\n",partfile); exit(0); } /*Read the total number of particles and the size of teh box*/ fread(&np, sizeof(int), 1, part); fread(&L, sizeof(float), 1, part); /*Some parameters of simulation and grid*/ nd = (int)floor(pow(np,1.0/3.0)/2+0.5); /*Number of divisons for the allocation*/ Lb = L/nd; /*Size of each sub-box*/ nbh = (int)floor(Lch/Lb + 0.9); /*Number of grids that contain Lch*/ nbv = (int)floor(Lcv/Lb + 0.9); /*Number of grids that contain Lcv*/ n_m = np/(L*L*L); /*The mean density in (Mpc/h)^-3*/ Lm = L/pow(np,1.0/3.0); /*The mean separation between particles*/ printf("The mean density of the catalog is %f\n", n_m); printf("np = %d and nd = %d\n", np, nd); /*Allocating the counts*/ cont1 = (int ***)malloc(nd*sizeof(int **)); cont2 = (int ***)malloc(nd*sizeof(int **)); for(i=0;i<nd;i++){ cont1[i] = (int **)malloc(nd*sizeof(int *)); cont2[i] = (int **)malloc(nd*sizeof(int *)); } for(i=0;i<nd;i++) for(j=0;j<nd;j++){ cont1[i][j] = (int *)malloc(nd*sizeof(int)); cont2[i][j] = (int *)malloc(nd*sizeof(int)); } for(i=0;i<nd;i++) for(j=0;j<nd;j++) for(k=0;k<nd;k++){ cont1[i][j][k] = 0; cont2[i][j][k] = 0; } /*Read the position of all particles and determines yours sub-box*/ p = (PARTICLE *)malloc(np*sizeof(PARTICLE)); for (i=0;i<np;i++){ for(j=0;j<3;j++) fread(&p[i].pos[j], sizeof(float), 1, part); indices(p[i].pos, xt, Lb, nd); cont1[xt[0]][xt[1]][xt[2]] += 1; } fclose(part); /*Allocating the grid estructure*/ m = (GRID ****)malloc(nd*sizeof(GRID ***)); for(i=0;i<nd;i++) m[i] = (GRID ***)malloc(nd*sizeof(GRID **)); for(i=0;i<nd;i++) for(j=0;j<nd;j++) m[i][j] = (GRID **)malloc(nd*sizeof(GRID *)); for(i=0;i<nd;i++) for(j=0;j<nd;j++) for(k=0;k<nd;k++) m[i][j][k] = (GRID *)malloc(cont1[i][j][k]*sizeof(GRID)); /*Array with the information if the particle is in a halo or a void*/ flag = (int *)malloc(np*sizeof(int)); /*Saving particles in the grid structure m (used to find the nearst particles faster)*/ for(i=0;i<np;i++){ flag[i] = 0; indices(p[i].pos, xt, Lb, nd); m[xt[0]][xt[1]][xt[2]][cont2[xt[0]][xt[1]][xt[2]]].label = i; for(j=0;j<3;j++) m[xt[0]][xt[1]][xt[2]][cont2[xt[0]][xt[1]][xt[2]]].pos[j] = p[i].pos[j]; cont2[xt[0]][xt[1]][xt[2]] += 1; } /*Free p*/ free(p); /*Free the count2*/ for(i=0;i<nd;i++) for(j=0;j<nd;j++) free(cont2[i][j]); for(i=0;i<nd;i++) free(cont2[i]); free(cont2); /*Read the total number of halos and voids and allocate them*/ nh = 0; nv = 0; printf("Ns = %d\n", Ns); for(i=0;i<Ns;i++) for(j=0;j<Ns;j++) for(k=0;k<Ns;k++){ sprintf(critfile, "%s_%d_%d_%d.crit", argv[1], i, j, k); crit = fopen(critfile, "rb"); if (crit == NULL){ printf("Unable to open critical file %s.\n\n", critfile); exit(0); } fread(&a, sizeof(int), 1, crit); fread(&b, sizeof(int), 1, crit); fclose(crit); nh += a; nv += b; } printf("There are %d halos and %d voids\n", nh, nv); /*Alloc the arrays with the centers*/ if(DOhv == 0 || DOhv == 2) centerh = (CENTER *)malloc(nh*sizeof(CENTER)); /*Array with the halo's centers*/ if(DOhv == 1 || DOhv == 2) centerv = (CENTER *)malloc(nv*sizeof(CENTER)); /*Array with the void's centers*/ /*Read the information about peaks and troughs*/ conth = 0; contv = 0; for(i=0;i<Ns;i++) for(j=0;j<Ns;j++) for(k=0;k<Ns;k++){ sprintf(critfile, "%s_%d_%d_%d.crit", argv[1], i, j, k); crit = fopen(critfile, "rb"); if (crit == NULL){ printf("Unable to open critical file %s.\n\n", critfile); exit(0); } fread(&a, sizeof(int), 1, crit); fread(&b, sizeof(int), 1, crit); for(c=0;c<(a + b);c++){ for(l=0;l<3;l++) fread(&xc[l], sizeof(float), 1, crit); fread(&den, sizeof(float), 1, crit); if(den >= Dh && (DOhv == 0 || DOhv == 2)){ centerh[conth].label = conth; for(l=0;l<3;l++) centerh[conth].pos[l] = xc[l]; centerh[conth].den = den; conth ++; } if(den <= Dv && (DOhv == 1 || DOhv == 2)){ centerv[contv].label = contv; for(l=0;l<3;l++) centerv[contv].pos[l] = xc[l]; centerv[contv].den = den; contv ++; } } } fclose(crit); /*Check the number of halos*/ if((DOhv == 0 || DOhv == 2) && nh != conth){ printf("There are some problem with the halo number! %d != %d\n", nh, conth); exit(0); } /*Check the number of voids*/ if((DOhv == 1 || DOhv == 2) && nv != contv){ printf("There are some problem with the void number! %d != %d\n", nv, contv); exit(0); } /*************************/ /*Start to grow the halos*/ /*************************/ if(DOhv == 0 || DOhv == 2){ /*Sort the peaks using its density*/ quickSort_center(centerh, 0, nh-1); /*Number of halos for each core*/ div = nh/N_cores; rest = nh%N_cores; /*Alloc the array with the final centers*/ cen = (CENTER2 **)malloc(N_cores*sizeof(CENTER2 *)); for(i=0;i<N_cores;i++) cen[i] = (CENTER2 *)malloc((div+1)*sizeof(CENTER2 )); /*Put the centers in a convenient way for the paralization*/ for(i=0;i<N_cores;i++) for(j=0;j<div;j++){ for(k=0;k<3;k++) cen[i][j].pos[k] = centerh[(nh - 1) - (j*N_cores + i)].pos[k]; cen[i][j].label = centerh[(nh - 1) - (j*N_cores + i)].label; } for(i=0;i<rest;i++){ for(k=0;k<3;k++) cen[i][div].pos[k] = centerh[(nh - 1) - (div*N_cores + i)].pos[k]; cen[i][div].label = centerh[(nh - 1) - (div*N_cores + i)].label; } free(centerh); /*Array with the final halos*/ hv = (HV *)malloc(nh*sizeof(HV)); np2 = 0; printf("We have %d peaks to look!\n", nh); omp_set_num_threads(N_cores); #pragma omp parallel for private(i, j, k, l, cont, percent) for(i=0;i<N_cores;i++){ percent = 0; for(l=0;l<div+1;l++){ /*Jump if end in this core*/ if(l == div && i>=rest) continue; /*Declarates the main variables*/ float *x_c, *dx, den, dist; int x, y, z, a, b, c, *xt, n; RAD *r; r = (RAD *)malloc(np*sizeof(RAD)); x_c = (float *)malloc(3*sizeof(float)); dx = (float *)malloc(3*sizeof(float)); xt = (int *)malloc(3*sizeof(int)); /*The center of the halo*/ for(k=0;k<3;k++) x_c[k] = cen[i][l].pos[k]; cont = 0; indices(x_c, xt, Lb, nd); /*Evaluate the distance between all particles close to the center*/ for(a=-nbh;a<=nbh;a++) for(b=-nbh;b<=nbh;b++) for(c=-nbh;c<=nbh;c++){ x = xt[0] + a; y = xt[1] + b; z = xt[2] + c; if(x<0) x += nd; if(x>=nd) x -= nd; if(y<0) y += nd; if(y>=nd) y -= nd; if(z<0) z += nd; if(z>=nd) z -= nd; for(j=0;j<cont1[x][y][z];j++){ for (k=0;k<3;k++){ dx[k] = x_c[k] - m[x][y][z][j].pos[k]; if(dx[k] < -L/2.0) dx[k] = dx[k] + L; if(dx[k] > L/2.0) dx[k] = dx[k] - L; } dist = 0.0; for (k=0;k<3;k++) dist += dx[k]*dx[k]; dist = sqrt(dist); if(dist < Lch){ r[cont].label = m[x][y][z][j].label; r[cont].dist = dist; cont ++; } } } /*Ordening by distance to the core particle*/ quickSort( r, 0, cont-1); den = 0.0; /*Number density of the halo*/ n = 1; /*Number of particles in the halo*/ a = 1; /*Information about the three other central particles*/ if(flag[r[0].label] != 0) a = 0; if(a == 1) flag[r[0].label] = cen[i][l].label + 1; /*Find the size and the number of particles of the halo*/ while(a == 1){ if(flag[r[n].label] != 0) break; den = 3.0*(n+1)/(4.0*PI*r[n].dist*r[n].dist*r[n].dist); if(den/n_m < Dh) break; flag[r[n].label] = cen[i][l].label + 1; n++; } if(a == 1){ #pragma omp critical { hv[np2].label = cen[i][l].label + 1; hv[np2].pos[0] = x_c[0]; hv[np2].pos[1] = x_c[1]; hv[np2].pos[2] = x_c[2]; hv[np2].R = (r[n].dist + r[n-1].dist)/2; hv[np2].errR = (r[n].dist - r[n-1].dist)/2; hv[np2].np = n; hv[np2].den = den/n_m; np2 ++; } } free(x_c); free(dx); free(xt); free(r); if(percent < l*100/div){ percent = l*100/div; printf("%d%% of the halos found in the porcessor %d\n", percent, i); } } } /*Open the output halo's file*/ outh = fopen(outhfile,"w"); if (outh == NULL) { printf("Unable to open halo's output file %s.\n\n", outhfile); exit(0); } /*Print the main halo's information*/ fprintf(outh,"%d\n", np2); for(i=0;i<np2;i++) fprintf(outh,"%d %f %f %f %f %f %d %f\n", hv[i].label, hv[i].pos[0], hv[i].pos[1], hv[i].pos[2], hv[i].R, hv[i].errR, hv[i].np, hv[i].den); /*Free the center's positions*/ for(i=0;i<N_cores;i++) free(cen[i]); free(cen); fclose(outh); free(hv); } /*****************************/ /*Start the grow of the voids*/ /*****************************/ if(DOhv == 1 || DOhv == 2){ /*Sort the troughts using its density*/ quickSort_center(centerv, 0, nv-1); /*Number of voids for each core*/ div = nv/N_cores; rest = nv%N_cores; /*Alloc the array with the final centers*/ cen = (CENTER2 **)malloc(N_cores*sizeof(CENTER2 *)); for(i=0;i<N_cores;i++) cen[i] = (CENTER2 *)malloc((div+1)*sizeof(CENTER2 )); /*Put the centers in a convenient way for the paralization*/ for(i=0;i<N_cores;i++) for(j=0;j<div;j++){ for(k=0;k<3;k++) cen[i][j].pos[k] = centerv[j*N_cores + i].pos[k]; cen[i][j].label = centerv[j*N_cores + i].label; } for(i=0;i<rest;i++){ for(k=0;k<3;k++) cen[i][div].pos[k] = centerv[div*N_cores + i].pos[k]; cen[i][div].label = centerv[div*N_cores + i].label; } free(centerv); /*Array with the final voids*/ hv = (HV *)malloc(nv*sizeof(HV)); np2 = 0; printf("\nWe have %d troughs to look!\n", nv); omp_set_num_threads(N_cores); #pragma omp parallel for private(i, j, k, l, cont, percent) for(i=0;i<N_cores;i++){ percent = 0; for(l=0;l<div+1;l++){ /*Jump if end in this core*/ if(l == div && i>=rest) continue; /*Declarates the main variables*/ float *x_c, *dx, den, dist; int x, y, z, a, b, c, *xt, n; RAD *r; r = (RAD *)malloc(np*sizeof(RAD)); x_c = (float *)malloc(3*sizeof(float)); dx = (float *)malloc(3*sizeof(float)); xt = (int *)malloc(3*sizeof(int)); /*The center of the void*/ for(k=0;k<3;k++) x_c[k] = cen[i][l].pos[k]; cont = 0; indices(x_c, xt, Lb, nd); /*Evaluate the distance between all particles close to the center*/ for(a=-nbv;a<=nbv;a++) for(b=-nbv;b<=nbv;b++) for(c=-nbv;c<=nbv;c++){ x = xt[0] + a; y = xt[1] + b; z = xt[2] + c; if(x<0) x += nd; if(x>=nd) x -= nd; if(y<0) y += nd; if(y>=nd) y -= nd; if(z<0) z += nd; if(z>=nd) z -= nd; for(j=0;j<cont1[x][y][z];j++){ for (k=0;k<3;k++){ dx[k] = x_c[k] - m[x][y][z][j].pos[k]; if(dx[k] < -L/2.0) dx[k] = dx[k] + L; if(dx[k] > L/2.0) dx[k] = dx[k] - L; } dist = 0.0; for (k=0;k<3;k++) dist += dx[k]*dx[k]; dist = sqrt(dist); if(dist < Lcv){ r[cont].label = m[x][y][z][j].label; r[cont].dist = dist; cont ++; } } } /*Ordening by distance to the core particle*/ quickSort( r, 0, cont-1); den = 0.0; /*Number density of the halo*/ n = 3; /*Number of particles in the halo*/ a = 1; /*Information about the three other central particles*/ for(k=0;k<3;k++) if(flag[r[k].label] != 0) a = 0; for(k=0;k<3 && a == 1;k++) flag[r[k].label] = -cen[i][l].label - 1; /*Find the size and the number of particles of the halo*/ while(a == 1 && n < cont){ if(flag[r[n].label] != 0) break; den = 3.0*(n+1)/(4.0*PI*r[n].dist*r[n].dist*r[n].dist); if(den/n_m > Dv) break; flag[r[n].label] = -cen[i][l].label - 1; n++; } if(n == cont) printf("n = %d and cont = %d. r[cont].dist = %f\n", n, cont, r[cont-1].dist); if(a == 1){ #pragma omp critical { hv[np2].label = cen[i][l].label + 1; hv[np2].pos[0] = x_c[0]; hv[np2].pos[1] = x_c[1]; hv[np2].pos[2] = x_c[2]; hv[np2].R = (r[n].dist + r[n-1].dist)/2; hv[np2].errR = (r[n].dist - r[n-1].dist)/2; hv[np2].np = n; hv[np2].den = den/n_m; np2 ++; } } free(x_c); free(dx); free(xt); free(r); if(percent < l*100/div){ percent = l*100/div; printf("%d%% of the voids found in the porcessor %d\n", percent, i); } } } /*Open the output void's file*/ outv = fopen(outvfile,"w"); if (outv == NULL) { printf("Unable to open the output file %s.\n\n", outvfile); exit(0); } /*Print the main void's information*/ fprintf(outv,"%d\n", np2); for(i=0;i<np2;i++) fprintf(outv,"%d %f %f %f %f %f %d %f\n", hv[i].label, hv[i].pos[0], hv[i].pos[1], hv[i].pos[2], hv[i].R, hv[i].errR, hv[i].np, hv[i].den); /*Free the center's positions*/ for(i=0;i<N_cores;i++) free(cen[i]); free(cen); fclose(outv); free(hv); } /*Free the count1*/ for(i=0;i<nd;i++) for(j=0;j<nd;j++) free(cont1[i][j]); for(i=0;i<nd;i++) free(cont1[i]); free(cont1); /*Free the m structure*/ for(i=0;i<nd;i++) for(j=0;j<nd;j++) for(k=0;k<nd;k++) free(m[i][j][k]); for(i=0;i<nd;i++) for(j=0;j<nd;j++) free(m[i][j]); for(i=0;i<nd;i++) free(m[i]); free(m); /*Open the particles properties file*/ outp = fopen(propfile, "w"); if (outp == NULL) { printf("Unable to open %s\n", propfile); exit(0); } /*Print the structure of the particle*/ fprintf(outp, "%d\n", np); for(i=0;i<np;i++) fprintf(outp, "%d\n", flag[i]); fclose(outp); return 0; }
rakp_fmt_plug.c
/* * This software is Copyright (c) 2013 magnum, and it is hereby released to the * general public under the following terms: Redistribution and use in source * and binary forms, with or without modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_rakp; #elif FMT_REGISTERS_H john_register_one(&fmt_rakp); #else #include <string.h> #include "arch.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 2048 // tuned for i7 using SSE2 and w/o HT #endif #endif #include "misc.h" #include "common.h" #include "formats.h" #include "sha.h" #include "johnswap.h" #include "simd-intrinsics.h" #include "memdbg.h" #define FORMAT_LABEL "RAKP" #define FORMAT_NAME "IPMI 2.0 RAKP (RMCP+)" #ifdef SIMD_COEF_32 #define SHA1_N (SIMD_PARA_SHA1 * SIMD_COEF_32) #endif #define ALGORITHM_NAME "HMAC-SHA1 " SHA1_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 125 #define PAD_SIZE 64 #define BINARY_SIZE 20 #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_LENGTH (2 * PAD_SIZE) #define SALT_ALIGN MEM_ALIGN_NONE #define SALT_MIN_SIZE (PAD_SIZE - 8) #define SALT_MAX_SIZE (2 * PAD_SIZE - 8 - 1) #define FORMAT_TAG "$rakp$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT SHA1_N #define MAX_KEYS_PER_CRYPT SHA1_N #define GETPOS(i, index) ((index & (SIMD_COEF_32 - 1)) * 4 + ((i) & (0xffffffff - 3)) * SIMD_COEF_32 + (3 - ((i) & 3)) + (unsigned int)index/SIMD_COEF_32 * SHA_BUF_SIZ * 4 * SIMD_COEF_32) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests tests[] = { {"$rakp$a4a3a2a03f0b000094272eb1ba576450b0d98ad10727a9fb0ab83616e099e8bf5f7366c9c03d36a3000000000000000000000000000000001404726f6f74$0ea27d6d5effaa996e5edc855b944e179a2f2434", "calvin"}, {"$rakp$c358d2a72f0c00001135f9b254c274629208b22f1166d94d2eba47f21093e9734355a33593da16f2000000000000000000000000000000001404726f6f74$41fce60acf2885f87fcafdf658d6f97db12639a9", "calvin"}, {"$rakp$b7c2d6f13a43dce2e44ad120a9cd8a13d0ca23f0414275c0bbe1070d2d1299b1c04da0f1a0f1e4e2537300263a2200000000000000000000140768617368636174$472bdabe2d5d4bffd6add7b3ba79a291d104a9ef", "hashcat"}, /* dummy hash for testing long salts */ {"$rakp$787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878787878$ba4ecc30a0b36a6ba0db862fc95201a81b9252ee", ""}, {NULL} }; #ifdef SIMD_COEF_32 #define cur_salt rakp_cur_salt static unsigned char *crypt_key; static unsigned char *ipad, *prep_ipad; static unsigned char *opad, *prep_opad; JTR_ALIGN(MEM_ALIGN_SIMD) unsigned char cur_salt[2][SHA_BUF_SIZ * 4 * SHA1_N]; static int bufsize; #else static struct { int length; unsigned char salt[SALT_LENGTH]; } cur_salt; static ARCH_WORD_32 (*crypt_key)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static unsigned char (*ipad)[PAD_SIZE]; static unsigned char (*opad)[PAD_SIZE]; static SHA_CTX *ipad_ctx; static SHA_CTX *opad_ctx; #endif static char (*saved_plain)[PLAINTEXT_LENGTH + 1]; static int new_keys; #define SALT_SIZE sizeof(cur_salt) #ifdef SIMD_COEF_32 static void clear_keys(void) { memset(ipad, 0x36, bufsize); memset(opad, 0x5C, bufsize); } #endif static void init(struct fmt_main *self) { #ifdef SIMD_COEF_32 int i; #endif #ifdef _OPENMP int omp_t = omp_get_num_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif #ifdef SIMD_COEF_32 bufsize = sizeof(*opad) * self->params.max_keys_per_crypt * SHA_BUF_SIZ * 4; crypt_key = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD); ipad = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD); opad = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD); prep_ipad = mem_calloc_align(self->params.max_keys_per_crypt * BINARY_SIZE, sizeof(*prep_ipad), MEM_ALIGN_SIMD); prep_opad = mem_calloc_align(self->params.max_keys_per_crypt * BINARY_SIZE, sizeof(*prep_opad), MEM_ALIGN_SIMD); for (i = 0; i < self->params.max_keys_per_crypt; ++i) { crypt_key[GETPOS(BINARY_SIZE, i)] = 0x80; ((unsigned int*)crypt_key)[15 * SIMD_COEF_32 + (i&(SIMD_COEF_32-1)) + i/SIMD_COEF_32 * SHA_BUF_SIZ * SIMD_COEF_32] = (BINARY_SIZE + 64) << 3; } clear_keys(); #else crypt_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_key)); ipad = mem_calloc(self->params.max_keys_per_crypt, sizeof(*ipad)); opad = mem_calloc(self->params.max_keys_per_crypt, sizeof(*opad)); ipad_ctx = mem_calloc(self->params.max_keys_per_crypt, sizeof(*ipad_ctx)); opad_ctx = mem_calloc(self->params.max_keys_per_crypt, sizeof(*opad_ctx)); #endif saved_plain = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_plain)); } static void done(void) { MEM_FREE(saved_plain); #ifdef SIMD_COEF_32 MEM_FREE(prep_opad); MEM_FREE(prep_ipad); #else MEM_FREE(opad_ctx); MEM_FREE(ipad_ctx); #endif MEM_FREE(opad); MEM_FREE(ipad); MEM_FREE(crypt_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *q = NULL; int len; p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; q = strrchr(ciphertext, '$'); if (!q) return 0; q = q + 1; if ((q - p - 1) > SALT_MAX_SIZE * 2) return 0; if ((q - p - 1) < SALT_MIN_SIZE * 2) return 0; len = strspn(q, HEXCHARS_lc); if (len != BINARY_SIZE * 2 || len != strlen(q)) return 0; if (strspn(p, HEXCHARS_lc) != q - p - 1) return 0; return 1; } static void set_salt(void *salt) { memcpy(&cur_salt, salt, SALT_SIZE); } static void set_key(char *key, int index) { int len; #ifdef SIMD_COEF_32 ARCH_WORD_32 *ipadp = (ARCH_WORD_32*)&ipad[GETPOS(3, index)]; ARCH_WORD_32 *opadp = (ARCH_WORD_32*)&opad[GETPOS(3, index)]; const ARCH_WORD_32 *keyp = (ARCH_WORD_32*)key; unsigned int temp; len = strlen(key); memcpy(saved_plain[index], key, len); saved_plain[index][len] = 0; if (len > PAD_SIZE) { unsigned char k0[BINARY_SIZE]; SHA_CTX ctx; int i; SHA1_Init(&ctx); SHA1_Update(&ctx, key, len); SHA1_Final(k0, &ctx); keyp = (unsigned int*)k0; for(i = 0; i < BINARY_SIZE / 4; i++, ipadp += SIMD_COEF_32, opadp += SIMD_COEF_32) { temp = JOHNSWAP(*keyp++); *ipadp ^= temp; *opadp ^= temp; } } else while(((temp = JOHNSWAP(*keyp++)) & 0xff000000)) { if (!(temp & 0x00ff0000) || !(temp & 0x0000ff00)) { ((unsigned short*)ipadp)[1] ^= (unsigned short)(temp >> 16); ((unsigned short*)opadp)[1] ^= (unsigned short)(temp >> 16); break; } *ipadp ^= temp; *opadp ^= temp; if (!(temp & 0x000000ff)) break; ipadp += SIMD_COEF_32; opadp += SIMD_COEF_32; } #else int i; len = strlen(key); memcpy(saved_plain[index], key, len); saved_plain[index][len] = 0; memset(ipad[index], 0x36, PAD_SIZE); memset(opad[index], 0x5C, PAD_SIZE); if (len > PAD_SIZE) { SHA_CTX ctx; unsigned char k0[BINARY_SIZE]; SHA1_Init(&ctx); SHA1_Update(&ctx, key, len); SHA1_Final(k0, &ctx); len = BINARY_SIZE; for(i = 0; i < len; i++) { ipad[index][i] ^= k0[i]; opad[index][i] ^= k0[i]; } } else for(i = 0; i < len; i++) { ipad[index][i] ^= key[i]; opad[index][i] ^= key[i]; } #endif new_keys = 1; } static char *get_key(int index) { return saved_plain[index]; } static int cmp_all(void *binary, int count) { #ifdef SIMD_COEF_32 unsigned int x, y = 0; for(;y < (unsigned int)(count + SIMD_COEF_32 - 1) / SIMD_COEF_32; y++) for(x = 0; x < SIMD_COEF_32; x++) { // NOTE crypt_key is in input format (4*SHA_BUF_SIZ*SIMD_COEF_32) if(((ARCH_WORD_32*)binary)[0] == ((ARCH_WORD_32*)crypt_key)[x + y * SIMD_COEF_32 * SHA_BUF_SIZ]) return 1; } return 0; #else int index = 0; #if defined(_OPENMP) || (MAX_KEYS_PER_CRYPT > 1) for (index = 0; index < count; index++) #endif if (((ARCH_WORD_32*)binary)[0] == crypt_key[index][0]) return 1; return 0; #endif } static int cmp_one(void *binary, int index) { #ifdef SIMD_COEF_32 int i; for(i = 0; i < (BINARY_SIZE/4); i++) // NOTE crypt_key is in input format (4 * SHA_BUF_SIZ * SIMD_COEF_32) if (((ARCH_WORD_32*)binary)[i] != ((ARCH_WORD_32*)crypt_key)[i * SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32 * SHA_BUF_SIZ * SIMD_COEF_32]) return 0; return 1; #else return !memcmp(binary, crypt_key[index], BINARY_SIZE); #endif } static int cmp_exact(char *source, int index) { return (1); } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #if _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { #ifdef SIMD_COEF_32 if (new_keys) { SIMDSHA1body(&ipad[index * SHA_BUF_SIZ * 4], (unsigned int*)&prep_ipad[index * BINARY_SIZE], NULL, SSEi_MIXED_IN); SIMDSHA1body(&opad[index * SHA_BUF_SIZ * 4], (unsigned int*)&prep_opad[index * BINARY_SIZE], NULL, SSEi_MIXED_IN); } SIMDSHA1body(cur_salt[0], (unsigned int*)&crypt_key[index * SHA_BUF_SIZ * 4], (unsigned int*)&prep_ipad[index * BINARY_SIZE], SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT); SIMDSHA1body(cur_salt[1], (unsigned int*)&crypt_key[index * SHA_BUF_SIZ * 4], (unsigned int*)&crypt_key[index * SHA_BUF_SIZ * 4], SSEi_MIXED_IN|SSEi_RELOAD_INP_FMT|SSEi_OUTPUT_AS_INP_FMT); SIMDSHA1body(&crypt_key[index * SHA_BUF_SIZ * 4], (unsigned int*)&crypt_key[index * SHA_BUF_SIZ * 4], (unsigned int*)&prep_opad[index * BINARY_SIZE], SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT); #else SHA_CTX ctx; if (new_keys) { SHA1_Init(&ipad_ctx[index]); SHA1_Update(&ipad_ctx[index], ipad[index], PAD_SIZE); SHA1_Init(&opad_ctx[index]); SHA1_Update(&opad_ctx[index], opad[index], PAD_SIZE); } memcpy(&ctx, &ipad_ctx[index], sizeof(ctx)); SHA1_Update(&ctx, cur_salt.salt, cur_salt.length); SHA1_Final((unsigned char*) crypt_key[index], &ctx); memcpy(&ctx, &opad_ctx[index], sizeof(ctx)); SHA1_Update(&ctx, crypt_key[index], BINARY_SIZE); SHA1_Final((unsigned char*) crypt_key[index], &ctx); #endif } new_keys = 0; return count; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '$') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } #ifdef SIMD_COEF_32 alter_endianity(out, BINARY_SIZE); #endif return out; } static void *get_salt(char *ciphertext) { static unsigned char salt[SALT_LENGTH]; unsigned int i, len; #ifdef SIMD_COEF_32 unsigned int j; #endif memset(salt, 0, sizeof(salt)); memset(&cur_salt, 0, sizeof(cur_salt)); if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; len = (strrchr(ciphertext, '$') - ciphertext) / 2; for (i = 0; i < len; i++) salt[i] = (atoi16[ARCH_INDEX(ciphertext[2 * i])] << 4) | atoi16[ARCH_INDEX(ciphertext[2 * i + 1])]; #ifdef SIMD_COEF_32 for (i = 0; i < len; i++) for (j = 0; j < SHA1_N; ++j) cur_salt[i>>6][GETPOS(i & 63, j)] = ((unsigned char*)salt)[i]; for (i = 0; i < SHA1_N; ++i) cur_salt[len>>6][GETPOS(len & 63, i)] = 0x80; for (j = len + 1; j < SALT_LENGTH; ++j) for (i = 0; i < SHA1_N; ++i) cur_salt[j>>6][GETPOS(j & 63, i)] = 0; for (i = 0; i < SHA1_N; ++i) ((unsigned int*)cur_salt[1])[15 * SIMD_COEF_32 + (i&(SIMD_COEF_32-1)) + i/SIMD_COEF_32 * SHA_BUF_SIZ * SIMD_COEF_32] = (len + 64) << 3; return &cur_salt; #else cur_salt.length = len; memcpy(cur_salt.salt, salt, len); return &cur_salt; #endif } struct fmt_main fmt_rakp = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { NULL }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash /* Not usable with $SOURCE_HASH$ */ }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, #ifdef SIMD_COEF_32 clear_keys, #else fmt_default_clear_keys, #endif crypt_all, { fmt_default_get_hash /* Not usable with $SOURCE_HASH$ */ }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
openmp_wrapper.h
/*! * Copyright (c) 2017 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_OPENMP_WRAPPER_H_ #define LIGHTGBM_OPENMP_WRAPPER_H_ #ifdef _OPENMP #include <exception> #include <memory> #include <mutex> #include <stdexcept> #include <vector> #include <omp.h> #include <LightGBM/utils/log.h> inline int OMP_NUM_THREADS() { int ret = 1; #pragma omp parallel #pragma omp master { ret = omp_get_num_threads(); } return ret; } class ThreadExceptionHelper { public: ThreadExceptionHelper() { ex_ptr_ = nullptr; } ~ThreadExceptionHelper() { ReThrow(); } void ReThrow() { if (ex_ptr_ != nullptr) { std::rethrow_exception(ex_ptr_); } } void CaptureException() { // only catch first exception. if (ex_ptr_ != nullptr) { return; } std::unique_lock<std::mutex> guard(lock_); if (ex_ptr_ != nullptr) { return; } ex_ptr_ = std::current_exception(); } private: std::exception_ptr ex_ptr_; std::mutex lock_; }; #define OMP_INIT_EX() ThreadExceptionHelper omp_except_helper #define OMP_LOOP_EX_BEGIN() try { #define OMP_LOOP_EX_END() \ } \ catch (std::exception & ex) { \ Log::Warning(ex.what()); \ omp_except_helper.CaptureException(); \ } \ catch (...) { \ omp_except_helper.CaptureException(); \ } #define OMP_THROW_EX() omp_except_helper.ReThrow() #else #ifdef _MSC_VER #pragma warning(disable : 4068) // disable unknown pragma warning #endif #ifdef __cplusplus extern "C" { #endif /** Fall here if no OPENMP support, so just simulate a single thread running. All #pragma omp should be ignored by the compiler **/ inline void omp_set_num_threads(int) {} inline int omp_get_num_threads() {return 1;} inline int omp_get_thread_num() {return 0;} inline int OMP_NUM_THREADS() { return 1; } #ifdef __cplusplus }; // extern "C" #endif #define OMP_INIT_EX() #define OMP_LOOP_EX_BEGIN() #define OMP_LOOP_EX_END() #define OMP_THROW_EX() #endif #endif /* LIGHTGBM_OPENMP_WRAPPER_H_ */
Vector.c
#include <memory.h> #include "../Vector.h" #include "../VectorGlobalConfigure.h" C_API Vector* VectorCreate() { return VectorCreateWithCapacity(VectorGetInitializationCapacity()); } C_API Vector* VectorCreateWithCapacity(UInt capacity) { if (capacity == 0) { return Null; } Vector *self = (Vector*)VectorGetMemMallocFunc()(sizeof(Vector)); if (!self) { return Null; } self->DataPtr = (VectorNode*)VectorGetMemMallocFunc()(sizeof(VectorNode) * capacity); if (!self->DataPtr) { VectorGetMemFreeFunc()(self); return Null; } self->Size = 0; self->Capacity = capacity; memset(self->DataPtr, 0, capacity * sizeof(VectorNode)); return self; } C_API void VectorDestroy(Vector** selfPtr) { VectorGetMemFreeFunc()((*selfPtr)->DataPtr); VectorGetMemFreeFunc()((*selfPtr)); *selfPtr = Null; } C_API void VectorDestroyWithFreeElements(Vector** selfPtr) { VectorClear(*selfPtr); VectorDestroy(selfPtr); } C_API UInt VectorLen(Vector *self) { return self->Size; } C_API UInt VectorCapacity(Vector *self) { return self->Capacity; } C_API Bool VectorReSetCapacity(Vector *self, UInt newCapacity) { if (newCapacity <= self->Capacity) { return False; } VectorNode* ptr = (VectorNode*)VectorGetMemMallocFunc()(sizeof(VectorNode) * newCapacity); if (ptr == Null) { return False; } memcpy_s(ptr, newCapacity * sizeof(VectorNode), self->DataPtr, self->Size * sizeof(VectorNode)); memset(&ptr[self->Size], 0, (newCapacity - self->Size) * sizeof(VectorNode)); VectorGetMemFreeFunc()(self->DataPtr); self->DataPtr = ptr; self->Capacity = newCapacity; return True; } C_API void VectorClear(Vector* self) { int i; #pragma omp parallel for for (i = 0; i < (int)self->Size; ++i) { if (self->DataPtr[i].Data) { VectorGetMemFreeFunc()(self->DataPtr[i].Data); self->DataPtr[i].Data = Null; } } self->Size = 0U; } C_API Bool VectorIsEmpty(Vector* self) { return self->Size == 0U ? True : False; } C_API Bool VectorAdd(Vector *self, void* data) { if (self->Size == self->Capacity) { VectorReSetCapacity(self, (UInt)(VectorGetMemoryGrowthRate() * self->Capacity + 0.5f)); } (self->DataPtr)[self->Size].Data = data; ++self->Size; return True; } C_API Bool VectorAddArray(Vector *self, void** data, UInt size) { char** ptr = (char**)data; int i; #pragma omp parallel for for (i = 0; i < (int)size; ++i) { VectorAdd(self, ptr[i]); } return True; } C_API NullUInt VectorGetIdxOfFirst(Vector *self, void* data) { NullUInt result = NullUIntCreateInvalid(); VectorNode * node = self->DataPtr; while (node < &(self->DataPtr[self->Size])) { if (node->Data == data) { result.Value = (UInt)((node - self->DataPtr) / sizeof(VectorNode)); result.Valid = True; break; } ++node; } return result; } C_API Bool VectorUpdateByIdx(Vector *self, UInt idx, void* data) { if (idx + 1 > self->Size) { return False; } self->DataPtr[idx].Data = data; return True; } C_API void* VectorGetByIdx(Vector *self, UInt idx) { if (idx + 1 > self->Size) { return Null; } return self->DataPtr[idx].Data; } C_API void* VectorRemoveByIdx(Vector *self, UInt idx) { if (idx + 1 > self->Size) { return Null; } void* tmp = self->DataPtr[idx].Data; self->DataPtr[idx].Data = self->DataPtr[--self->Size].Data; return tmp; } C_API Bool VectorRemoveAndFreeByIdx(Vector *self, UInt idx) { if (idx + 1 > self->Size) { return False; } void* tmp = self->DataPtr[idx].Data; self->DataPtr[idx].Data = self->DataPtr[--self->Size].Data; VectorGetMemFreeFunc()(tmp); return True; } C_API void VectorEachWithNoParam(Vector *self, NoParamCallback callback) { VectorNode * node = self->DataPtr; while (node < &(self->DataPtr[self->Size])) { callback(node->Data); ++node; } } C_API void VectorEachWithOneParam(Vector *self, OneParamCallback callback, void* param) { VectorNode * node = self->DataPtr; while (node < &(self->DataPtr[self->Size])) { callback(node->Data, param); ++node; } } C_API void VectorEachWithTwoParams(Vector *self, TwoParamsCallback callback, void* param1, void* param2) { VectorNode * node = self->DataPtr; while (node < &(self->DataPtr[self->Size])) { callback(node->Data, param1, param2); ++node; } } C_API void VectorEachWithThreeParams(Vector *self, ThreeParamsCallback callback, void* param1, void* param2, void* param3) { VectorNode * node = self->DataPtr; while (node < &(self->DataPtr[self->Size])) { callback(node->Data, param1, param2, param3); ++node; } }
vmath.h
#ifndef VMATH #define VMATH #include <vector> #include <stdexcept> #include <algorithm> namespace vec { struct DefaultOperators { struct Sum { template <class T> inline T operator()(const T &a, const T &b) const noexcept { return a + b; } }; struct Rem { template <class T> inline T operator()(const T &a, const T &b) const noexcept { return a - b; } }; struct Mul { template <class T> inline T operator()(const T &a, const T &b) const noexcept { return a * b; } }; struct Div { template <class T> inline T operator()(const T &a, const T &b) const noexcept { return a / b; } }; }; template <class FuncType> struct Calc : public FuncType { /** * @brief operator () Do single binary operation * @param a Left value * @param b Right value * @return Result of binary operation */ template <class T> inline auto operator()(const T &a, const T &b) const noexcept -> decltype(FuncType::operator()(a, b)) { return FuncType::operator()(a, b); } /** * @brief operator () Binary operation for each element in vectors. * @param result * @param a Left argument * @param b Right argument * @return True if computation done. Otherwise (vectors have not same size) * False */ template <class VectorType> inline bool operator()(VectorType &result, const VectorType &a, const VectorType &b) const noexcept { if (a.size() != b.size()) return false; if (result.size() < a.size()) result.resize(a.size()); #pragma omp parallel for for (size_t i = 0; i < a.size(); ++i) result[i] = FuncType::operator()(a[i], b[i]); return true; } /** * @brief operator () Binary operation for each element for each vector * @param result Result container * @param a Left argument * @param b Right argument * @param vectors Next arguments * @return True if computation done. Otherwise (vectors have not same size) * False */ template <class VectorType, class... Vectors> inline bool operator()(VectorType &result, const VectorType &a, const VectorType &b, const Vectors &... vectors) const noexcept { return operator()(result, a, b) &&operator()(result, result, vectors...); } template <class VectorType, class T> /** * @brief operator () Binary opertation for each element with constant * @param result Result container * @param a Left argument * @param b Right argument */ inline void operator()(VectorType &result, const VectorType &a, const T &b) const noexcept { #pragma omp parallel for for (size_t i = 0; i < a.size(); ++i) result[i] = FuncType::operator()(a[i], b); } }; const static Calc<DefaultOperators::Sum> sum; const static Calc<DefaultOperators::Rem> rem; const static Calc<DefaultOperators::Mul> mul; const static Calc<DefaultOperators::Div> div; template <class T, class VectorType> static inline T accumulate(T &result, const VectorType &v, const T &init = T()) noexcept { T res = init; #pragma omp parallel for reduction(+ : res) for (size_t i = 0; i < v.size(); ++i) res += v[i]; result = res; return result; } } #endif // VMATH