source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
GB_binop__isge_uint64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isge_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__isge_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__isge_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__isge_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_uint64)
// A*D function (colscale): GB (_AxD__isge_uint64)
// D*A function (rowscale): GB (_DxB__isge_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__isge_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__isge_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_uint64)
// C=scalar+B GB (_bind1st__isge_uint64)
// C=scalar+B' GB (_bind1st_tran__isge_uint64)
// C=A+scalar GB (_bind2nd__isge_uint64)
// C=A'+scalar GB (_bind2nd_tran__isge_uint64)
// C type: uint64_t
// A type: uint64_t
// A pattern? 0
// B type: uint64_t
// B pattern? 0
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGE || GxB_NO_UINT64 || GxB_NO_ISGE_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isge_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isge_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint64_t alpha_scalar ;
uint64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isge_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isge_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isge_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isge_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__isge_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__isge_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Example_target_offload_control.1.c | /*
* @@name: target_offload_control.1c
* @@type: C
* @@compilable: yes
* @@linkable: yes
* @@expect: success
* @@version: omp_5.0
*/
#include <omp.h>
#include <stdio.h>
#include <ctype.h>
#include <stdlib.h>
#include <string.h>
typedef enum offload_policy
{MANDATORY, DISABLED, DEFAULT, UNKNOWN, NOTSET} offload_policy_t;
offload_policy_t get_offload_policy()
{
char *env, *end;
size_t n;
env = getenv("OMP_TARGET_OFFLOAD");
if(env == NULL) return NOTSET;
end = env + strlen(env); //Find trimmed beginning/end
while ( *env && isspace(*(env )) ) env++;
while (end != env && isspace(*(end-1)) ) end--;
n = (int)(end - env);
//Find ONLY string -nothing more, case insensitive
if (n == 9 && !strncasecmp(env, "MANDATORY",n)) return MANDATORY;
else if (n == 8 && !strncasecmp(env, "DISABLED" ,n)) return DISABLED ;
else if (n == 7 && !strncasecmp(env, "DEFAULT" ,n)) return DEFAULT ;
else return UNKNOWN ;
}
int main()
{
int i;
int device_num, on_init_dev;
// get policy from OMP_TARGET_OFFLOAD variable
offload_policy_t policy = get_offload_policy();
if(_OPENMP< 201811)
{
printf("Warning: OMP_TARGET_OFFLOAD NOT supported by VER. %d\n",_OPENMP );
printf(" If OMP_TARGET_OFFLOAD is set, it will be ignored.\n");
}
// Set target device number to an unavailable
// device to test offload policy.
device_num = omp_get_num_devices() + 1;
// Policy:
printf("OMP_TARGET_OFFLOAD Policy: ");
if (policy==MANDATORY) printf("MANDATORY-Terminate if dev. not avail\n");
else if(policy==DISABLED ) printf("DISABLED -(if supported) Only on Host\n");
else if(policy==DEFAULT ) printf("DEFAULT -On host if device not avail\n");
else if(policy==UNKNOWN ) printf("OMP_TARGET_OFFLOAD has unknown value\n" );
else if(policy==NOTSET ) printf("OMP_TARGET_OFFLOAD not set\n" );
on_init_dev = 1;
// device# out of range--not supported
#pragma omp target device(device_num) map(tofrom: on_init_dev)
on_init_dev=omp_is_initial_device();
if (policy == MANDATORY && _OPENMP >= 201811)
printf("ERROR: OpenMP 5.0 implementation ignored MANDATORY policy.\n");
printf("Target region executed on init dev %s\n", on_init_dev ? "TRUE":"FALSE");
return 0;
}
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/Attr.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include <deque>
#include <memory>
#include <string>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
class InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class AttributeList;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class ExternalSemaSource;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPDeclareReductionDecl;
class OMPClause;
struct OverloadCandidate;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///\brief Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///\brief Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
// We are about to link these. It is now safe to compute the linkage of
// the new decl. If the new decl has external linkage, we will
// link it with the hidden decl (which also has external linkage) and
// it will keep having external linkage. If it has internal linkage, we
// will not link it. Since it has no previous decls, it will remain
// with internal linkage.
return isVisible(Old) || New->isExternallyVisible();
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// \brief Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// \brief Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// \brief Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
/// PackContext - Manages the stack for \#pragma pack. An alignment
/// of 0 indicates default alignment.
void *PackContext; // Really a "PragmaPackStack*"
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// \brief Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
enum PragmaVtorDispKind {
PVDK_Push, ///< #pragma vtordisp(push, mode)
PVDK_Set, ///< #pragma vtordisp(mode)
PVDK_Pop, ///< #pragma vtordisp(pop)
PVDK_Reset ///< #pragma vtordisp()
};
enum PragmaMsStackAction {
PSK_Reset, // #pragma ()
PSK_Set, // #pragma ("name")
PSK_Push, // #pragma (push[, id])
PSK_Push_Set, // #pragma (push[, id], "name")
PSK_Pop, // #pragma (pop[, id])
PSK_Pop_Set, // #pragma (pop[, id], "name")
};
/// \brief Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
///
/// The stack always has at least one element in it.
SmallVector<MSVtorDispAttr::Mode, 2> VtorDispModeStack;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// \brief Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
Slot(llvm::StringRef StackSlotLabel,
ValueType Value,
SourceLocation PragmaLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
explicit PragmaStack(const ValueType &Value)
: CurrentValue(Value) {}
SmallVector<Slot, 2> Stack;
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// \brief This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// \brief Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// ExprNeedsCleanups - True if the current evaluation context
/// requires cleanups to be run at its conclusion.
bool ExprNeedsCleanups;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// \brief Store a list of either DeclRefExprs or MemberExprs
/// that contain a reference to a variable (constant) that may or may not
/// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue
/// and discarded value conversions have been applied to all subexpressions
/// of the enclosing full expression. This is cleared at the end of each
/// full expression.
llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs;
/// \brief Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
///
/// This array is never empty. Clients should ignore the first
/// element, which is used to cache a single FunctionScopeInfo
/// that's used to parse every top-level function.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType;
/// \brief Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// \brief Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// \brief Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// \brief Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// \brief All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// \brief The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// \brief All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// \brief All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedExceptionSpecChecks;
/// \brief All the members seen during a class definition which were both
/// explicitly defaulted and had explicitly-specified exception
/// specifications, along with the function type containing their
/// user-specified exception specification. Those exception specifications
/// were overridden with the default specifications, but we still need to
/// check whether they are compatible with the default specification, and
/// we can't do that until the nesting set of class definitions is complete.
SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2>
DelayedDefaultedMemberExceptionSpecs;
typedef llvm::MapVector<const FunctionDecl *, LateParsedTemplate *>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// \brief Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// \brief The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// \brief RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC)
{
S.PushFunctionScope();
S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated);
}
~SynthesizedFunctionScope() {
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// \brief Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// \brief The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// \brief The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// \brief The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// \brief The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// \brief Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// \brief The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// \brief The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// \brief Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// \brief Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// \brief The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// \brief The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// \brief Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// \brief The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// \brief The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// \brief The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// \brief The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// \brief The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// \brief The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// \brief id<NSCopying> type.
QualType QIDNSCopying;
/// \brief will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// \brief counter for internal MS Asm label names.
unsigned MSAsmLabelNameCounter;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// \brief Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum ExpressionEvaluationContext {
/// \brief The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// \brief The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// \brief The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// \brief The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// \brief The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// \brief Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// \brief The expression evaluation context.
ExpressionEvaluationContext Context;
/// \brief Whether the enclosing context needed a cleanup.
bool ParentNeedsCleanups;
/// \brief Whether we are in a decltype expression.
bool IsDecltype;
/// \brief The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// \brief The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs;
/// \brief The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// \brief The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// \brief The context information used to mangle lambda expressions
/// and block literals within this context.
///
/// This mangling information is allocated lazily, since most contexts
/// do not have lambda expressions or block literals.
IntrusiveRefCntPtr<MangleNumberingContext> MangleNumbering;
/// \brief If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// \brief If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
bool ParentNeedsCleanups,
Decl *ManglingContextDecl,
bool IsDecltype)
: Context(Context), ParentNeedsCleanups(ParentNeedsCleanups),
IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects),
NumTypos(0),
ManglingContextDecl(ManglingContextDecl), MangleNumbering() { }
/// \brief Retrieve the mangling numbering context, used to consistently
/// number constructs like lambdas for mangling.
MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx);
bool isUnevaluated() const {
return Context == Unevaluated || Context == UnevaluatedAbstract;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// \brief Compute the mangling number context for a lambda expression or
/// block literal.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
/// \param[out] ManglingContextDecl - Returns the ManglingContextDecl
/// associated with the context, if relevant.
MangleNumberingContext *getCurrentMangleNumberContext(
const DeclContext *DC,
Decl *&ManglingContextDecl);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult : public llvm::FastFoldingSetNode {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
/// \brief A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResult> SpecialMemberCache;
/// \brief A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// \brief The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// \brief The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// \brief A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::DenseMap<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef std::pair<CXXRecordDecl*, CXXSpecialMember> SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
void ReadMethodPool(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// \brief Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema& S)
: S(S), OldFPContractState(S.FPFeatures.fp_contract) {}
~FPContractStateRAII() {
S.FPFeatures.fp_contract = OldFPContractState;
}
private:
Sema& S;
bool OldFPContractState : 1;
};
/// Records and restores the vtordisp state on entry/exit of C++ method body.
class VtorDispStackRAII {
public:
VtorDispStackRAII(Sema &S, bool ShouldSaveAndRestore)
: S(S), ShouldSaveAndRestore(ShouldSaveAndRestore), OldVtorDispStack() {
if (ShouldSaveAndRestore)
OldVtorDispStack = S.VtorDispModeStack;
}
~VtorDispStackRAII() {
if (ShouldSaveAndRestore)
S.VtorDispModeStack = OldVtorDispStack;
}
private:
Sema &S;
bool ShouldSaveAndRestore;
SmallVector<MSVtorDispAttr::Mode, 2> OldVtorDispStack;
};
void addImplicitTypedef(StringRef Name, QualType T);
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// \brief Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///\brief Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// \brief Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// \brief Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// \brief Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// \brief Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// \brief Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// \brief Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// \brief Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
void emitAndClearUnusedLocalTypedefWarnings();
void ActOnEndOfTranslationUnit();
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// \brief This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD,
CapturedRegionKind K);
void
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
const BlockExpr *blkExpr = nullptr);
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const {
if (FunctionScopes.empty())
return nullptr;
for (int e = FunctionScopes.size()-1; e >= 0; --e) {
if (isa<sema::BlockScopeInfo>(FunctionScopes[e]))
continue;
return FunctionScopes[e];
}
return nullptr;
}
template <typename ExprT>
void recordUseOfEvaluatedWeak(const ExprT *E, bool IsRead=true) {
if (!isUnevaluatedContext())
getCurFunction()->recordUseOfWeak(E, IsRead);
}
void PushCompoundScope();
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// \brief Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// \brief Retrieve the current lambda scope info, if any.
sema::LambdaScopeInfo *getCurLambda();
/// \brief Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// \brief Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// \brief Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildPipeType(QualType T,
SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
TypeSourceInfo *ReturnTypeInfo);
/// \brief Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc,
bool *MissingExceptionSpecification = nullptr,
bool *MissingEmptyExceptionSpecification = nullptr,
bool AllowNoexceptAllMatchWithNoSpec = false,
bool IsOperatorNew = false);
bool CheckExceptionSpecSubset(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Superset, SourceLocation SuperLoc,
const FunctionProtoType *Subset, SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic & NoteID,
const FunctionProtoType *Target, SourceLocation TargetLoc,
const FunctionProtoType *Source, SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// \brief The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// \brief Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
llvm::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, llvm::index_sequence_for<Ts...>());
DB << T;
}
};
private:
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser *Diagnoser);
VisibleModuleSet VisibleModules;
llvm::SmallVector<VisibleModuleSet, 16> VisibleModulesStack;
Module *CachedFakeTopLevelModule;
public:
/// \brief Get the module owning an entity.
Module *getOwningModule(Decl *Entity);
/// \brief Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND, SourceLocation Loc);
bool isModuleVisible(Module *M) { return VisibleModules.isVisible(M); }
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
bool hasVisibleMergedDefinition(NamedDecl *Def);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isCompleteType(SourceLocation Loc, QualType T) {
return !RequireCompleteTypeImpl(Loc, T, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo() : ShouldSkip(false), Previous(nullptr) {}
bool ShouldSkip;
NamedDecl *Previous;
};
/// List of decls defined in a function prototype. This contains EnumConstants
/// that incorrectly end up in translation unit scope because there is no
/// function to pin them on. ActOnFunctionDeclarator reads this list and patches
/// them into the FunctionDecl.
std::vector<NamedDecl*> DeclsInPrototypeScope;
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool AllowClassTemplates = false);
/// \brief For compatibility with MSVC, we delay parsing of some default
/// template type arguments until instantiation time. Emits a warning and
/// returns a synthesized DependentNameType that isn't really dependent on any
/// other template arguments.
ParsedType ActOnDelayedDefaultTemplateArg(const IdentifierInfo &II,
SourceLocation NameLoc);
/// \brief Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
NC_Unknown,
NC_Error,
NC_Keyword,
NC_Type,
NC_Expression,
NC_NestedNameSpecifier,
NC_TypeTemplate,
NC_VarTemplate,
NC_FunctionTemplate
};
class NameClassification {
NameClassificationKind Kind;
ExprResult Expr;
TemplateName Template;
ParsedType Type;
const IdentifierInfo *Keyword;
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {}
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword)
: Kind(NC_Keyword), Keyword(Keyword) { }
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification NestedNameSpecifier() {
return NameClassification(NC_NestedNameSpecifier);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
ExprResult getExpression() const {
assert(Kind == NC_Expression);
return Expr;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// \brief Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param IsAddressOfOperand True if this name is the operand of a unary
/// address of ('&') expression, assuming it is classified as an
/// expression.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification
ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name,
SourceLocation NameLoc, const Token &NextToken,
bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name,
SourceLocation Loc);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
void CheckShadow(Scope *S, VarDecl *D, const LookupResult& R);
void CheckShadow(Scope *S, VarDecl *D);
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
void CheckCompleteVariableDeclaration(VarDecl *var);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
bool CheckConstexprFunctionDecl(const FunctionDecl *FD);
bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsExplicitSpecialization);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit,
bool TypeMayContainAuto);
void ActOnUninitializedDecl(Decl *dcl, bool TypeMayContainAuto);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group,
bool TypeMayContainAuto = true);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// \brief Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// \brief Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineMethodDef(CXXMethodDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// \brief Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ParmVarDecl * const *Begin,
ParmVarDecl * const *End);
/// \brief Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Begin,
ParmVarDecl * const *End,
QualType ReturnTy,
NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// \brief Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S,
AttributeList *AttrList,
SourceLocation SemiLoc);
/// \brief The parser has processed a module import declaration.
///
/// \param AtLoc The location of the '@' symbol, if any.
///
/// \param ImportLoc The location of the 'import' keyword.
///
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc,
ModuleIdPath Path);
/// \brief The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// \brief The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// \brief The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// \brief Check if module import may be found in the current context,
/// emit error if not.
void diagnoseMisplacedModuleImport(Module *M, SourceLocation ImportLoc);
/// \brief Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument
};
/// \brief Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
bool NeedDefinition, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
/// \brief Retrieve a suitable printing policy.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// \brief Retrieve a suitable printing policy.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr, AccessSpecifier AS,
SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists,
bool &OwnedDecl, bool &IsDependent,
SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
AttributeList *MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
bool Diagnose = false);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD);
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields,
SourceLocation LBrac, SourceLocation RBrac,
AttributeList *AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
typedef void *SkippedDefinitionContext;
/// \brief Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceLocation RBraceLoc);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// \brief Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy,
bool EnumUnderlyingIsImplicit,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
AttributeList *Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
SourceLocation RBraceLoc, Decl *EnumDecl,
ArrayRef<Decl *> Elements,
Scope *S, AttributeList *Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// \brief Make the given externally-produced declaration visible at the
/// top level scope.
///
/// \param D The externally-produced declaration to push.
///
/// \param Name The name of the externally-produced declaration.
void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// \brief Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// \brief Don't merge availability attributes at all.
AMK_None,
/// \brief Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// \brief Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// \brief Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range,
IdentifierInfo *Platform,
VersionTuple Introduced,
VersionTuple Deprecated,
VersionTuple Obsoleted,
bool IsUnavailable,
StringRef Message,
bool IsStrict,
AvailabilityMergeKind AMK,
unsigned AttrSpellingListIndex);
TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range,
TypeVisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range,
VisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
MSInheritanceAttr *
mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase,
unsigned AttrSpellingListIndex,
MSInheritanceAttr::Spelling SemanticSpelling);
FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range,
IdentifierInfo *Format, int FormatIdx,
int FirstArg, unsigned AttrSpellingListIndex);
SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range,
IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, SourceRange Range,
IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
CommonAttr *mergeCommonAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl);
/// \brief Checks availability of the function depending on the current
/// function context.Inside an unavailable function,unavailability is ignored.
///
/// \returns true if \p FD is unavailable and current context is inside
/// an available function, false otherwise.
bool isFunctionConsideredUnavailable(FunctionDecl *FD);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsNoReturnConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr ///< Constant expression in a noptr-new-declarator.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// \brief Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// \brief Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// \brief Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// \brief Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// \brief Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// \brief Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// \brief Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// \brief Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallPtrSet<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallPtrSet<CXXRecordDecl *, 16> AssociatedClassSet;
void AddOverloadCandidate(FunctionDecl *Function,
DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = false);
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false);
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddConversionCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet& CandidateSet,
bool AllowObjCConversionOnExplicit);
void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet,
bool AllowObjCConversionOnExplicit);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
SourceRange OpRange = SourceRange());
void AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType = QualType(),
bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ParmVarDecl *const *Param,
ParmVarDecl *const *ParamEnd,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// @brief Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// \brief Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// \brief Look up any declaration with any name.
LookupAnyName
};
/// \brief Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// \brief The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// \brief The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists.
ForRedeclaration
};
/// \brief The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// \brief The lookup resulted in an error.
LOLR_Error,
/// \brief The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// \brief The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// \brief The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// \brief The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult *LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState&& other) LLVM_NOEXCEPT;
TypoExprState& operator=(TypoExprState&& other) LLVM_NOEXCEPT;
};
/// \brief The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// \brief Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// \brief The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// \brief Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// \brief Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// \brief Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// \brief Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
void addOverloadedOperatorToUnresolvedSet(UnresolvedSetImpl &Functions,
DeclAccessPair Operator,
QualType T1, QualType T2);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate);
bool isKnownName(StringRef name);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// \brief Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const AttributeList *AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const AttributeList &attr, unsigned &value);
bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckNoReturnAttr(const AttributeList &attr);
bool checkStringLiteralArgumentAttr(const AttributeList &Attr,
unsigned ArgNum, StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
void checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceAttr::Spelling SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType &T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Check whether a nullability type specifier can be added to the given
/// type.
///
/// \param type The type to which the nullability specifier will be
/// added. On success, this type will be updated appropriately.
///
/// \param nullability The nullability specifier to add.
///
/// \param nullabilityLoc The location of the nullability specifier.
///
/// \param isContextSensitive Whether this nullability specifier was
/// written as a context-sensitive keyword (in an Objective-C
/// method) or an Objective-C property attribute, rather than as an
/// underscored type specifier.
///
/// \returns true if nullability cannot be applied, false otherwise.
bool checkNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability,
SourceLocation nullabilityLoc,
bool isContextSensitive);
/// \brief Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt, AttributeList *Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
typedef llvm::DenseMap<Selector, ObjCMethodDecl*> ProtocolsMethodsMap;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl *IDecl);
void DefaultSynthesizeProperties(Scope *S, Decl *D);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
Selector SetterSel,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
Selector SetterSel,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// \brief Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// \brief - Returns instance or factory methods in global method pool for
/// given selector. If no such method or only one method found, function returns
/// false; otherwise, it returns true
bool CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool instance);
bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R,
bool receiverIdOrClass);
void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// \brief - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance);
/// \brief Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(ActOnFinishFullExpr(Arg, CC).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt();
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// \brief A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S): S(S) {
S.ActOnStartOfCompoundStmt();
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal,
SourceLocation DotDotDotLoc, Expr *RHSVal,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
StmtResult ActOnIfStmt(SourceLocation IfLoc,
FullExprArg CondVal, Decl *CondVar,
Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Expr *Cond,
Decl *CondVar);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc,
FullExprArg Cond,
Decl *CondVar, Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc,
SourceLocation CondLParen, Expr *Cond,
SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First, FullExprArg Second,
Decl *SecondVar,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *BeginEndDecl,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
bool AllowFunctionParameters);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
bool AllowFunctionParameters);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
SourceLocation RParenLoc);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
llvm::InlineAsmIdentifierInfo &Info,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
llvm::InlineAsmIdentifierInfo &Info,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// \brief If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// \brief Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
enum AvailabilityDiagnostic { AD_Deprecation, AD_Unavailable, AD_Partial };
void EmitAvailabilityWarning(AvailabilityDiagnostic AD,
NamedDecl *D, StringRef Message,
SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass,
const ObjCPropertyDecl *ObjCProperty,
bool ObjCPropertyAccess);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass=nullptr,
bool ObjCPropertyAccess=false);
void NoteDeletedFunction(FunctionDecl *FD);
std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
bool IsDecltype = false);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
ReuseLambdaContextDecl_t,
bool IsDecltype = false);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool OdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool OdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E);
void MarkMemberReferenced(MemberExpr *E);
void UpdateMarkingForLValueToRValue(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// \brief Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// \brief Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// \brief Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// \brief Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// \brief Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// \brief Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// \brief Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
std::unique_ptr<CorrectionCandidateCallback> CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty,
ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
ExprResult
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentType IT);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
SourceLocation LParenLoc,
ArrayRef<Expr *> Arg,
SourceLocation RParenLoc,
Expr *Config = nullptr,
bool IsExecConfig = false);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// \brief Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation Loc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// \brief Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// \brief The symbol exists.
IER_Exists,
/// \brief The symbol does not exist.
IER_DoesNotExist,
/// \brief The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// \brief An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc,
IdentifierInfo *Ident,
SourceLocation LBrace,
AttributeList *AttrList,
UsingDirectiveDecl * &UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
CXXRecordDecl *getStdBadAlloc() const;
/// \brief Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// \brief Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// \brief Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const CXXConstructorDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope,
SourceLocation UsingLoc,
SourceLocation NamespcLoc,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
AttributeList *AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS,
SourceLocation UsingLoc,
CXXScopeSpec &SS,
DeclarationNameInfo NameInfo,
AttributeList *AttrList,
bool IsInstantiation,
bool HasTypenameKeyword,
SourceLocation TypenameLoc);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
Decl *ActOnUsingDeclaration(Scope *CurScope,
AccessSpecifier AS,
bool HasUsingKeyword,
SourceLocation UsingLoc,
CXXScopeSpec &SS,
UnqualifiedId &Name,
AttributeList *AttrList,
bool HasTypenameKeyword,
SourceLocation TypenameLoc);
Decl *ActOnAliasDeclaration(Scope *CurScope,
AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc,
UnqualifiedId &Name,
AttributeList *AttrList,
TypeResult Type,
Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// \brief Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// \brief Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(ComputedEST != EST_ComputedNoexcept &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// \brief The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// \brief The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// \brief Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// \brief Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// \brief Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_ComputedNoexcept;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// \brief Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defautled
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// \brief Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(CXXConstructorDecl *CD);
/// \brief Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// \brief Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// \brief Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// \brief Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
/// \brief Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
bool Diagnose = false);
/// \brief Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// \brief Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl,
CXXDestructorDecl *Destructor);
/// \brief Declare all inheriting constructors for the given class.
///
/// \param ClassDecl The class declaration into which the inheriting
/// constructors will be added.
void DeclareInheritingConstructors(CXXRecordDecl *ClassDecl);
/// \brief Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// \brief Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// \brief Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// \brief Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// \brief Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// \brief Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// \brief Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// \brief Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// \brief Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// \brief Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorType(const DeclSpec& DS, ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// \brief Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// \brief Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// \brief When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// \brief RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// \brief Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// \brief Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr);
/// \brief Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Expr *ArraySize,
SourceRange DirectInitRange,
Expr *Initializer,
bool TypeMayContainAuto = true);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
bool UseGlobal, QualType AllocType, bool IsArray,
MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete);
bool FindAllocationOverload(SourceLocation StartLoc, SourceRange Range,
DeclarationName Name, MultiExprArg Args,
DeclContext *Ctx,
bool AllowMissing, FunctionDecl *&Operator,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
QualType Param1,
QualType Param2 = QualType(),
bool addRestrictAttr = false);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
DeclarationName Name);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
bool ConvertToBoolean);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// \brief Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the bianry type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
ExprResult ActOnFinishFullExpr(Expr *Expr) {
return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc()
: SourceLocation());
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue = false,
bool IsConstexpr = false,
bool IsLambdaInitCaptureInitializer = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// \brief The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// \brief The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
SourceLocation IdLoc,
IdentifierInfo &II,
ParsedType ObjectType);
bool BuildCXXNestedNameSpecifier(Scope *S,
IdentifierInfo &Identifier,
SourceLocation IdentifierLoc,
SourceLocation CCLoc,
QualType ObjectType,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr);
/// \brief The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param Identifier The identifier preceding the '::'.
///
/// \param IdentifierLoc The location of the identifier.
///
/// \param CCLoc The location of the '::'.
///
/// \param ObjectType The type of the object, if we're parsing
/// nested-name-specifier in a member access expression.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
IdentifierInfo &Identifier,
SourceLocation IdentifierLoc,
SourceLocation CCLoc,
ParsedType ObjectType,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo &Identifier,
SourceLocation IdentifierLoc,
SourceLocation ColonLoc,
ParsedType ObjectType,
bool EnteringContext);
/// \brief The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// \brief Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// \brief Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// \brief Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// \brief Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params);
/// \brief Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// \brief Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, IdentifierInfo *Id,
LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef,
IdentifierInfo *Id,
bool DirectInit, Expr *&Init);
/// \brief Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// \brief Build the implicit field for an init-capture.
FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// \brief Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief Introduce the lambda parameters into scope.
void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope);
/// \brief Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// \brief Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// \brief Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// \brief Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access,
SourceLocation ASLoc,
SourceLocation ColonLoc,
AttributeList *Attrs = nullptr);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// \brief The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// \brief The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// \brief The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// \brief Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// \brief Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// \brief Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc,
const CXXRecordDecl *RD);
/// \brief Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// \brief Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
void CheckCompletedCXXClass(CXXRecordDecl *Record);
void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc,
Decl *TagDecl,
SourceLocation LBrac,
SourceLocation RBrac,
AttributeList *AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass(Decl *D);
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD);
void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD,
const FunctionProtoType *T);
void CheckDelayedMemberExceptionSpecs();
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
const InitializedEntity &Entity,
AccessSpecifier Access,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
const InitializedEntity &Entity,
AccessSpecifier Access,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx);
bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
AccessSpecifier access,
QualType objectType);
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// \brief When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
Decl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
Decl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
Decl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<Decl *> Params,
SourceLocation RAngleLoc);
/// \brief The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsExplicitSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
AttributeList *Attr,
TemplateParameterList *TemplateParams,
AccessSpecifier AS,
SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc,
unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false);
/// \brief Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template);
DeclResult
ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc,
SourceLocation ModulePrivateLoc,
TemplateIdAnnotation &TemplateId,
AttributeList *Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(FunctionDecl *FD,
TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult
ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec,
SourceLocation KWLoc,
const CXXScopeSpec &SS,
TemplateTy Template,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
AttributeList *Attr);
DeclResult
ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec,
SourceLocation KWLoc,
CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
AttributeList *Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// \brief Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// \brief The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// \brief The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// \brief The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// \brief Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateArgumentLoc &Arg,
unsigned ArgumentPackIndex);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// \brief Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// \brief We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// \brief We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// \brief We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// \brief Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// \brief Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// \brief The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// \brief An arbitrary expression.
UPPC_Expression = 0,
/// \brief The base type of a class type.
UPPC_BaseType,
/// \brief The type of an arbitrary declaration.
UPPC_DeclarationType,
/// \brief The type of a data member.
UPPC_DataMemberType,
/// \brief The size of a bit-field.
UPPC_BitFieldWidth,
/// \brief The expression in a static assertion.
UPPC_StaticAssertExpression,
/// \brief The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// \brief The enumerator value.
UPPC_EnumeratorValue,
/// \brief A using declaration.
UPPC_UsingDeclaration,
/// \brief A friend declaration.
UPPC_FriendDeclaration,
/// \brief A declaration qualifier.
UPPC_DeclarationQualifier,
/// \brief An initializer.
UPPC_Initializer,
/// \brief A default argument.
UPPC_DefaultArgument,
/// \brief The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// \brief The type of an exception.
UPPC_ExceptionType,
/// \brief Partial specialization.
UPPC_PartialSpecialization,
/// \brief Microsoft __if_exists.
UPPC_IfExists,
/// \brief Microsoft __if_not_exists.
UPPC_IfNotExists,
/// \brief Lambda expression.
UPPC_Lambda,
/// \brief Block expression,
UPPC_Block
};
/// \brief Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// \brief If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// \brief If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// \brief If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// \brief If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// \brief If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// \brief If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// \brief Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param SS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(CXXScopeSpec &SS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// \brief Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// \brief Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// \brief Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// \brief Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// \brief Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// \brief Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// \brief Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// \brief Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType);
/// \brief Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// \brief Template argument deduction was successful.
TDK_Success = 0,
/// \brief The declaration was invalid; do nothing.
TDK_Invalid,
/// \brief Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// \brief Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// \brief Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// \brief Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// \brief Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// \brief After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// \brief A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// \brief When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// \brief When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// \brief The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// \brief The arguments included an overloaded function name that could
/// not be resolved to a suitable function.
TDK_FailedOverloadResolution,
/// \brief Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType,
unsigned ArgIdx,
QualType OriginalArgType)
: OriginalParamType(OriginalParamType), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) { }
QualType OriginalParamType;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult
FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool PartialOverloading = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool InOverloadResolution = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool InOverloadResolution = false);
/// \brief Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// \brief Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// \brief Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer,
QualType &Result);
DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer,
QualType &Result);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
unsigned NumCallArguments2);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// \brief A template instantiation that is currently in progress.
struct ActiveTemplateInstantiation {
/// \brief The kind of template instantiation we are performing
enum InstantiationKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template, and
/// TemplateArgs/NumTemplateArguments provides the template
/// arguments as specified.
/// FIXME: Use a TemplateArgumentList
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a ClassTemplatePartialSpecializationDecl or
/// a FunctionTemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation
} Kind;
/// \brief The point of instantiation within the source code.
SourceLocation PointOfInstantiation;
/// \brief The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// \brief The entity that is being instantiated.
Decl *Entity;
/// \brief The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
/// \brief The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// \brief The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// \brief The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
ActiveTemplateInstantiation()
: Kind(TemplateInstantiation), Template(nullptr), Entity(nullptr),
TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {}
/// \brief Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
friend bool operator==(const ActiveTemplateInstantiation &X,
const ActiveTemplateInstantiation &Y) {
if (X.Kind != Y.Kind)
return false;
if (X.Entity != Y.Entity)
return false;
switch (X.Kind) {
case TemplateInstantiation:
case ExceptionSpecInstantiation:
return true;
case PriorTemplateArgumentSubstitution:
case DefaultTemplateArgumentChecking:
return X.Template == Y.Template && X.TemplateArgs == Y.TemplateArgs;
case DefaultTemplateArgumentInstantiation:
case ExplicitTemplateArgumentSubstitution:
case DeducedTemplateArgumentSubstitution:
case DefaultFunctionArgumentInstantiation:
return X.TemplateArgs == Y.TemplateArgs;
}
llvm_unreachable("Invalid InstantiationKind!");
}
friend bool operator!=(const ActiveTemplateInstantiation &X,
const ActiveTemplateInstantiation &Y) {
return !(X == Y);
}
};
/// \brief List of active template instantiations.
///
/// This vector is treated as a stack. As one template instantiation
/// requires another template instantiation, additional
/// instantiations are pushed onto the stack up to a
/// user-configurable limit LangOptions::InstantiationDepth.
SmallVector<ActiveTemplateInstantiation, 16>
ActiveTemplateInstantiations;
/// \brief Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> ActiveTemplateInstantiationLookupModules;
/// \brief Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// \brief Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// \brief Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// \brief Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// \brief The number of ActiveTemplateInstantiation entries in
/// \c ActiveTemplateInstantiations that are not actual instantiations and,
/// therefore, should not be counted as part of the instantiation depth.
unsigned NonInstantiationEntries;
/// \brief The last template from which a template instantiation
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant template
/// instantiation backtraces when there are multiple errors in the
/// same instantiation. FIXME: Does this belong in Sema? It's tough
/// to implement it anywhere else.
ActiveTemplateInstantiation LastTemplateInstantiationErrorContext;
/// \brief The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// \brief RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// \brief For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// \brief A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// \brief Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// \brief Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
ActiveTemplateInstantiation::InstantiationKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// \brief Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// \brief Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
private:
Sema &SemaRef;
bool Invalid;
bool SavedInNonInstantiationSFINAEContext;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, ActiveTemplateInstantiation::InstantiationKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void PrintInstantiationStack();
/// \brief Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// \brief Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// \brief RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE)
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
}
/// \brief Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// \brief RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// \brief The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// \brief Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// \brief The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// \brief A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// \brief Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// \brief An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// \brief The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
class SavePendingInstantiationsAndVTableUsesRAII {
public:
SavePendingInstantiationsAndVTableUsesRAII(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
~SavePendingInstantiationsAndVTableUsesRAII() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// \brief The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class SavePendingLocalImplicitInstantiationsRAII {
public:
SavePendingLocalImplicitInstantiationsRAII(Sema &S): S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
~SavePendingLocalImplicitInstantiationsRAII() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
unsigned ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc,
ParmVarDecl **Params, unsigned NumParams,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// \brief Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false);
void InstantiateStaticDataMemberDefinition(
SourceLocation PointOfInstantiation,
VarDecl *Var,
bool Recursive = false,
bool DefinitionRequired = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange,
Decl * const *ProtoRefs,
unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc,
AttributeList *AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc,
IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc,
Decl * const *ProtoRefNames, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc,
AttributeList *AttrList);
Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName,
SourceLocation CategoryLoc,
Decl * const *ProtoRefs,
unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc);
Decl *ActOnStartClassImplementation(
SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName, SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
AttributeList *attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Check the application of the Objective-C '__kindof' qualifier to
/// the given type.
bool checkObjCKindOfType(QualType &type, SourceLocation loc);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
AttributeList *ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType,
ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo,
DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args
AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// \brief Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// \brief The message is sent to 'super'.
ObjCSuperMessage,
/// \brief The message is an instance message.
ObjCInstanceMessage,
/// \brief The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// \brief Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// \brief Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
enum PragmaPackKind {
PPK_Default, // #pragma pack([n])
PPK_Show, // #pragma pack(show), only supported by MSVC.
PPK_Push, // #pragma pack(push, [identifier], [n])
PPK_Pop // #pragma pack(pop, [identifier], [n])
};
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(PragmaPackKind Kind,
IdentifierInfo *Name,
Expr *Alignment,
SourceLocation PragmaLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// \brief Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaVtorDispKind Kind, SourceLocation PragmaLoc,
MSVtorDispAttr::Mode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// \brief Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// \brief Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// \brief Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// \brief Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT
void ActOnPragmaFPContract(tok::OnOffSwitch OOS);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
/// \brief Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// \brief Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// \brief Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// \brief Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex, bool IsPackExpansion);
void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T,
unsigned SpellingListIndex, bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE,
unsigned SpellingListIndex);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads,
Expr *MinBlocks, unsigned SpellingListIndex);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name,
unsigned SpellingListIndex, bool InInstantiation = false);
void AddParameterABIAttr(SourceRange AttrRange, Decl *D,
ParameterABI ABI, unsigned SpellingListIndex);
void AddNSConsumedAttr(SourceRange AttrRange, Decl *D,
unsigned SpellingListIndex, bool isNSConsumed,
bool isTemplateInstantiation);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(SourceLocation KwLoc, Expr *E);
ExprResult BuildCoawaitExpr(SourceLocation KwLoc, Expr *E);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// \brief Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
public:
/// \brief Return true if the provided declaration \a VD should be captured by
/// reference in the provided scope \a RSI. This will take into account the
/// semantics of the directive and associated clauses.
bool IsOpenMPCapturedByRef(ValueDecl *D,
const sema::CapturedRegionScopeInfo *RSI);
/// \brief Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *IsOpenMPCapturedDecl(ValueDecl *D);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// \brief Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateDecl(ValueDecl *D, unsigned Level);
/// \brief Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(ValueDecl *D, unsigned Level);
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// \brief Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// \brief Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// \brief End analysis of clauses.
void EndOpenMPClause();
/// \brief Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// \brief Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// \brief Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope,
CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id);
/// \brief Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// \brief Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// \brief Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// \brief Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// \brief Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// \brief Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// \brief Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// \brief Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer);
/// \brief Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// \brief Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// \brief End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// \brief Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// \brief Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
/// \brief Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc,
llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// \brief Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind,
OpenMPLinearClauseKind LinKind, OpenMPMapClauseKind MapTypeModifier,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation DepLinMapLoc);
/// \brief Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'reduction' clause.
OMPClause *
ActOnOpenMPReductionClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc,
SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId);
/// \brief Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// \brief Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(OpenMPMapClauseKind MapTypeModifier,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// \brief Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// \brief Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// \brief The kind of conversion being performed.
enum CheckedConversionKind {
/// \brief An implicit conversion.
CCK_ImplicitConversion,
/// \brief A C-style cast.
CCK_CStyleCast,
/// \brief A functional-style cast.
CCK_FunctionalCast,
/// \brief A cast other than a C-style cast.
CCK_OtherCast
};
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointer - The assignment is between two pointers types which
/// point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
// CheckSingleAssignmentConstraints - Currently used by
// CheckAssignmentOperands, and ActOnReturnStmt. Prior to type checking,
// this routine performs the default function/array converions, if ConvertRHS
// is true.
AssignConvertType CheckSingleAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
bool ConvertRHS = true);
// \brief If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool isRelational);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool *NonStandardCompositeType = nullptr);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool *NonStandardCompositeType = nullptr) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp,
NonStandardCompositeType);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool isRelational);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible_With_Added_Qualification - The two types are
/// reference-compatible with added qualification, meaning that
/// they are reference-compatible and the qualifiers on T1 (cv1)
/// are greater than the qualifiers on T2 (cv2).
Ref_Compatible_With_Added_Qualification,
/// Ref_Compatible - The two types are reference-compatible and
/// have equivalent qualifiers (cv1 == cv2).
Ref_Compatible
};
ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc,
QualType T1, QualType T2,
bool &DerivedToBase,
bool &ObjCConversion,
bool &ObjCLifetimeConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// \brief Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// \brief Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// \brief Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// \brief Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds.
ARCConversionResult CheckObjCARCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage,
SourceLocation lbrac, SourceLocation rbrac,
SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// \brief Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(QualType ReceiverType,
ObjCMethodDecl *Method,
bool isClassMessage, bool isSuperMessage);
/// \brief If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// \brief Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(Expr *E, SourceLocation Loc);
ExprResult ActOnBooleanCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// \brief Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// \brief Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D);
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode. Only in effect if
// LangOpts.CUDADisableTargetCallChecks is true.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
bool CheckCUDATarget(const FunctionDecl *Caller, const FunctionDecl *Callee);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(const FunctionDecl *Caller,
SmallVectorImpl<FunctionDecl *> &Matches);
void EraseUnwantedCUDAMatches(const FunctionDecl *Caller,
SmallVectorImpl<DeclAccessPair> &Matches);
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
/// \name Code completion
//@{
/// \brief Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// \brief Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// \brief Code completion occurs within a class, struct, or union.
PCC_Class,
/// \brief Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// \brief Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// \brief Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// \brief Code completion occurs following one or more template
/// headers.
PCC_Template,
/// \brief Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// \brief Code completion occurs within an expression.
PCC_Expression,
/// \brief Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// \brief Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// \brief Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// \brief Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// \brief Code completion occurs where only a type is permitted.
PCC_Type,
/// \brief Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// \brief Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool IsArrow);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args);
void CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc,
ArrayRef<Expr *> Args);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteReturn(Scope *S);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S,
bool IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteNaturalLanguage();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
ArrayRef<const Expr *> Args, bool IsMemberFunction,
SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartImpl(CallExpr *TheCall);
bool SemaBuiltinVAStart(CallExpr *TheCall);
bool SemaBuiltinMSVAStart(CallExpr *TheCall);
bool SemaBuiltinVAStartARM(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum,
int Low, int High);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl,
IdentifierInfo *FnInfo);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS);
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// \brief Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// \brief Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// \brief Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// \brief Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// \brief A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// \brief Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const Expr * const *ExprArgs);
/// \brief The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// \brief Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
AvailabilityResult getCurContextAvailability() const;
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// \brief To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
};
/// \brief RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
public:
EnterExpressionEvaluationContext(Sema &Actions,
Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
bool IsDecltype = false)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
IsDecltype);
}
EnterExpressionEvaluationContext(Sema &Actions,
Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
bool IsDecltype = false)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(NewContext,
Sema::ReuseLambdaContextDecl,
IsDecltype);
}
~EnterExpressionEvaluationContext() {
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// \brief Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// \brief The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
#endif
|
core_csymm.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zsymm.c, normal z -> c, Fri Sep 28 17:38:23 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "core_lapack.h"
/***************************************************************************//**
*
* @ingroup core_symm
*
* Performs one of the matrix-matrix operations
*
* \f[ C = \alpha \times A \times B + \beta \times C \f]
* or
* \f[ C = \alpha \times B \times A + \beta \times C \f]
*
* where alpha and beta are scalars, A is a symmetric matrix and B and
* C are m-by-n matrices.
*
*******************************************************************************
*
* @param[in] side
* Specifies whether the symmetric matrix A appears on the
* left or right in the operation as follows:
* - PlasmaLeft: \f[ C = \alpha \times A \times B + \beta \times C \f]
* - PlasmaRight: \f[ C = \alpha \times B \times A + \beta \times C \f]
*
* @param[in] uplo
* Specifies whether the upper or lower triangular part of
* the symmetric matrix A is to be referenced as follows:
* - PlasmaLower: Only the lower triangular part of the
* symmetric matrix A is to be referenced.
* - PlasmaUpper: Only the upper triangular part of the
* symmetric matrix A is to be referenced.
*
* @param[in] m
* The number of rows of the matrix C. m >= 0.
*
* @param[in] n
* The number of columns of the matrix C. n >= 0.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* A is an lda-by-ka matrix, where ka is m when side = PlasmaLeft,
* and is n otherwise. Only the uplo triangular part is referenced.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,ka).
*
* @param[in] B
* B is an ldb-by-n matrix, where the leading m-by-n part of
* the array B must contain the matrix B.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,m).
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] C
* C is an ldc-by-n matrix.
* On exit, the array is overwritten by the m-by-n updated matrix.
*
* @param[in] ldc
* The leading dimension of the array C. ldc >= max(1,m).
*
******************************************************************************/
__attribute__((weak))
void plasma_core_csymm(plasma_enum_t side, plasma_enum_t uplo,
int m, int n,
plasma_complex32_t alpha, const plasma_complex32_t *A, int lda,
const plasma_complex32_t *B, int ldb,
plasma_complex32_t beta, plasma_complex32_t *C, int ldc)
{
cblas_csymm(CblasColMajor,
(CBLAS_SIDE)side, (CBLAS_UPLO)uplo,
m, n,
CBLAS_SADDR(alpha), A, lda,
B, ldb,
CBLAS_SADDR(beta), C, ldc);
}
/******************************************************************************/
void plasma_core_omp_csymm(
plasma_enum_t side, plasma_enum_t uplo,
int m, int n,
plasma_complex32_t alpha, const plasma_complex32_t *A, int lda,
const plasma_complex32_t *B, int ldb,
plasma_complex32_t beta, plasma_complex32_t *C, int ldc,
plasma_sequence_t *sequence, plasma_request_t *request)
{
int ak;
if (side == PlasmaLeft)
ak = m;
else
ak = n;
#pragma omp task depend(in:A[0:lda*ak]) \
depend(in:B[0:ldb*n]) \
depend(inout:C[0:ldc*n])
{
if (sequence->status == PlasmaSuccess)
plasma_core_csymm(side, uplo,
m, n,
alpha, A, lda,
B, ldb,
beta, C, ldc);
}
}
|
apm.c | /**
* APPROXIMATE PATTERN MATCHING
*
* INF560 X2016
*/
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/time.h>
#include <omp.h>
#define APM_DEBUG 0
char *
read_input_file( char * filename, int * size )
{
char * buf ;
off_t fsize;
int fd = 0 ;
int n_bytes = 1 ;
/* Open the text file */
fd = open( filename, O_RDONLY ) ;
if ( fd == -1 )
{
fprintf( stderr, "Unable to open the text file <%s>\n", filename ) ;
return NULL ;
}
/* Get the number of characters in the textfile */
fsize = lseek(fd, 0, SEEK_END);
lseek(fd, 0, SEEK_SET);
/* TODO check return of lseek */
#if APM_DEBUG
printf( "File length: %lld\n", fsize ) ;
#endif
/* Allocate data to copy the target text */
buf = (char *)malloc( fsize * sizeof ( char ) ) ;
if ( buf == NULL )
{
fprintf( stderr, "Unable to allocate %lld byte(s) for main array\n",
fsize ) ;
return NULL ;
}
n_bytes = read( fd, buf, fsize ) ;
if ( n_bytes != fsize )
{
fprintf( stderr,
"Unable to copy %lld byte(s) from text file (%d byte(s) copied)\n",
fsize, n_bytes) ;
return NULL ;
}
#if APM_DEBUG
printf( "Number of read bytes: %d\n", n_bytes ) ;
#endif
*size = n_bytes ;
close( fd ) ;
return buf ;
}
#define MIN3(a, b, c) ((a) < (b) ? ((a) < (c) ? (a) : (c)) : ((b) < (c) ? (b) : (c)))
int levenshtein(char *s1, char *s2, int len, int * column) {
unsigned int x, y, lastdiag, olddiag;
for (y = 1; y <= len; y++)
{
column[y] = y;
}
for (x = 1; x <= len; x++) {
column[0] = x;
lastdiag = x-1 ;
for (y = 1; y <= len; y++) {
olddiag = column[y];
column[y] = MIN3(
column[y] + 1,
column[y-1] + 1,
lastdiag + (s1[y-1] == s2[x-1] ? 0 : 1)
);
lastdiag = olddiag;
}
}
return(column[len]);
}
int
main( int argc, char ** argv )
{
char ** pattern ;
char * filename ;
int approx_factor = 0 ;
int nb_patterns = 0 ;
int i, j ;
char * buf ;
struct timeval t1, t2;
double duration ;
int n_bytes ;
int * n_matches ;
int tmp_matches ;
/* Check number of arguments */
if ( argc < 4 )
{
printf( "Usage: %s approximation_factor "
"dna_database pattern1 pattern2 ...\n",
argv[0] ) ;
return 1 ;
}
/* Get the distance factor */
approx_factor = atoi( argv[1] ) ;
/* Grab the filename containing the target text */
filename = argv[2] ;
/* Get the number of patterns that the user wants to search for */
nb_patterns = argc - 3 ;
/* Fill the pattern array */
pattern = (char **)malloc( nb_patterns * sizeof( char * ) ) ;
if ( pattern == NULL )
{
fprintf( stderr,
"Unable to allocate array of pattern of size %d\n",
nb_patterns ) ;
return 1 ;
}
/* Grab the patterns */
for ( i = 0 ; i < nb_patterns ; i++ )
{
int l ;
l = strlen(argv[i+3]) ;
if ( l <= 0 )
{
fprintf( stderr, "Error while parsing argument %d\n", i+3 ) ;
return 1 ;
}
pattern[i] = (char *)malloc( (l+1) * sizeof( char ) ) ;
if ( pattern[i] == NULL )
{
fprintf( stderr, "Unable to allocate string of size %d\n", l ) ;
return 1 ;
}
strncpy( pattern[i], argv[i+3], (l+1) ) ;
}
printf( "Approximate Pattern Mathing: "
"looking for %d pattern(s) in file %s w/ distance of %d\n",
nb_patterns, filename, approx_factor ) ;
buf = read_input_file( filename, &n_bytes ) ;
if ( buf == NULL )
{
return 1 ;
}
/* Allocate the array of matches */
n_matches = (int *)malloc( nb_patterns * sizeof( int ) ) ;
if ( n_matches == NULL )
{
fprintf( stderr, "Error: unable to allocate memory for %ldB\n",
nb_patterns * sizeof( int ) ) ;
return 1 ;
}
/*****
* BEGIN MAIN LOOP
******/
/* Timer start */
gettimeofday(&t1, NULL);
for ( i = 0 ; i < nb_patterns ; i++ )
{
int size_pattern = strlen(pattern[i]) ;
int * column ;
n_matches[i] = 0 ;
tmp_matches = 0 ;
#pragma omp parallel
{
#pragma omp for schedule(guided) reduction(+:tmp_matches)
for ( j = 0 ; j < n_bytes ; j++ )
{
column = (int *)malloc( (size_pattern+1) * sizeof( int ) ) ;
if ( column == NULL )
{
fprintf( stderr, "Error: unable to allocate memory for column (%ldB)\n",
(size_pattern+1) * sizeof( int ) ) ;
exit(1);
}
int distance = 0 ;
int size ;
#if APM_DEBUG
if ( j % 100 == 0 )
{
printf( "Procesing byte %d (out of %d)\n", j, n_bytes ) ;
}
#endif
size = size_pattern ;
if ( n_bytes - j < size_pattern )
{
size = n_bytes - j ;
}
distance = levenshtein( pattern[i], &buf[j], size, column ) ;
if ( distance <= approx_factor ) {
tmp_matches = tmp_matches + 1 ;
}
}
}
free( column );
n_matches[i] = tmp_matches;
}
/* Timer stop */
gettimeofday(&t2, NULL);
duration = (t2.tv_sec -t1.tv_sec)+((t2.tv_usec-t1.tv_usec)/1e6);
printf( "APM done in %lf s\n", duration ) ;
/*****
* END MAIN LOOP
******/
for ( i = 0 ; i < nb_patterns ; i++ )
{
printf( "Number of matches for pattern <%s>: %d\n",
pattern[i], n_matches[i] ) ;
}
return 0 ;
}
|
Stmt.h | //===- Stmt.h - Classes for representing statements -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Stmt interface and subclasses.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMT_H
#define LLVM_CLANG_AST_STMT_H
#include "clang/AST/DeclGroup.h"
#include "clang/AST/DependenceFlags.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/CapturedStmt.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
#include <string>
namespace llvm {
class FoldingSetNodeID;
} // namespace llvm
namespace clang {
class ASTContext;
class Attr;
class CapturedDecl;
class Decl;
class Expr;
class AddrLabelExpr;
class LabelDecl;
class ODRHash;
class PrinterHelper;
struct PrintingPolicy;
class RecordDecl;
class SourceManager;
class StringLiteral;
class Token;
class VarDecl;
//===----------------------------------------------------------------------===//
// AST classes for statements.
//===----------------------------------------------------------------------===//
/// Stmt - This represents one statement.
///
class alignas(void *) Stmt {
public:
enum StmtClass {
NoStmtClass = 0,
#define STMT(CLASS, PARENT) CLASS##Class,
#define STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class,
#define LAST_STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class
#define ABSTRACT_STMT(STMT)
#include "clang/AST/StmtNodes.inc"
};
// Make vanilla 'new' and 'delete' illegal for Stmts.
protected:
friend class ASTStmtReader;
friend class ASTStmtWriter;
void *operator new(size_t bytes) noexcept {
llvm_unreachable("Stmts cannot be allocated with regular 'new'.");
}
void operator delete(void *data) noexcept {
llvm_unreachable("Stmts cannot be released with regular 'delete'.");
}
//===--- Statement bitfields classes ---===//
class StmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class Stmt;
/// The statement class.
unsigned sClass : 8;
};
enum { NumStmtBits = 8 };
class NullStmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class NullStmt;
unsigned : NumStmtBits;
/// True if the null statement was preceded by an empty macro, e.g:
/// @code
/// #define CALL(x)
/// CALL(0);
/// @endcode
unsigned HasLeadingEmptyMacro : 1;
/// The location of the semi-colon.
SourceLocation SemiLoc;
};
class CompoundStmtBitfields {
friend class ASTStmtReader;
friend class CompoundStmt;
unsigned : NumStmtBits;
unsigned NumStmts : 32 - NumStmtBits;
/// The location of the opening "{".
SourceLocation LBraceLoc;
};
class LabelStmtBitfields {
friend class LabelStmt;
unsigned : NumStmtBits;
SourceLocation IdentLoc;
};
class AttributedStmtBitfields {
friend class ASTStmtReader;
friend class AttributedStmt;
unsigned : NumStmtBits;
/// Number of attributes.
unsigned NumAttrs : 32 - NumStmtBits;
/// The location of the attribute.
SourceLocation AttrLoc;
};
class IfStmtBitfields {
friend class ASTStmtReader;
friend class IfStmt;
unsigned : NumStmtBits;
/// True if this if statement is a constexpr if.
unsigned IsConstexpr : 1;
/// True if this if statement has storage for an else statement.
unsigned HasElse : 1;
/// True if this if statement has storage for a variable declaration.
unsigned HasVar : 1;
/// True if this if statement has storage for an init statement.
unsigned HasInit : 1;
/// The location of the "if".
SourceLocation IfLoc;
};
class SwitchStmtBitfields {
friend class SwitchStmt;
unsigned : NumStmtBits;
/// True if the SwitchStmt has storage for an init statement.
unsigned HasInit : 1;
/// True if the SwitchStmt has storage for a condition variable.
unsigned HasVar : 1;
/// If the SwitchStmt is a switch on an enum value, records whether all
/// the enum values were covered by CaseStmts. The coverage information
/// value is meant to be a hint for possible clients.
unsigned AllEnumCasesCovered : 1;
/// The location of the "switch".
SourceLocation SwitchLoc;
};
class WhileStmtBitfields {
friend class ASTStmtReader;
friend class WhileStmt;
unsigned : NumStmtBits;
/// True if the WhileStmt has storage for a condition variable.
unsigned HasVar : 1;
/// The location of the "while".
SourceLocation WhileLoc;
};
class DoStmtBitfields {
friend class DoStmt;
unsigned : NumStmtBits;
/// The location of the "do".
SourceLocation DoLoc;
};
class ForStmtBitfields {
friend class ForStmt;
unsigned : NumStmtBits;
/// The location of the "for".
SourceLocation ForLoc;
};
class GotoStmtBitfields {
friend class GotoStmt;
friend class IndirectGotoStmt;
unsigned : NumStmtBits;
/// The location of the "goto".
SourceLocation GotoLoc;
};
class ContinueStmtBitfields {
friend class ContinueStmt;
unsigned : NumStmtBits;
/// The location of the "continue".
SourceLocation ContinueLoc;
};
class BreakStmtBitfields {
friend class BreakStmt;
unsigned : NumStmtBits;
/// The location of the "break".
SourceLocation BreakLoc;
};
class ReturnStmtBitfields {
friend class ReturnStmt;
unsigned : NumStmtBits;
/// True if this ReturnStmt has storage for an NRVO candidate.
unsigned HasNRVOCandidate : 1;
/// The location of the "return".
SourceLocation RetLoc;
};
class SwitchCaseBitfields {
friend class SwitchCase;
friend class CaseStmt;
unsigned : NumStmtBits;
/// Used by CaseStmt to store whether it is a case statement
/// of the form case LHS ... RHS (a GNU extension).
unsigned CaseStmtIsGNURange : 1;
/// The location of the "case" or "default" keyword.
SourceLocation KeywordLoc;
};
//===--- Expression bitfields classes ---===//
class ExprBitfields {
friend class ASTStmtReader; // deserialization
friend class AtomicExpr; // ctor
friend class BlockDeclRefExpr; // ctor
friend class CallExpr; // ctor
friend class CXXConstructExpr; // ctor
friend class CXXDependentScopeMemberExpr; // ctor
friend class CXXNewExpr; // ctor
friend class CXXUnresolvedConstructExpr; // ctor
friend class DeclRefExpr; // computeDependence
friend class DependentScopeDeclRefExpr; // ctor
friend class DesignatedInitExpr; // ctor
friend class Expr;
friend class InitListExpr; // ctor
friend class ObjCArrayLiteral; // ctor
friend class ObjCDictionaryLiteral; // ctor
friend class ObjCMessageExpr; // ctor
friend class OffsetOfExpr; // ctor
friend class OpaqueValueExpr; // ctor
friend class OverloadExpr; // ctor
friend class ParenListExpr; // ctor
friend class PseudoObjectExpr; // ctor
friend class ShuffleVectorExpr; // ctor
unsigned : NumStmtBits;
unsigned ValueKind : 2;
unsigned ObjectKind : 3;
unsigned /*ExprDependence*/ Dependent : llvm::BitWidth<ExprDependence>;
};
enum { NumExprBits = NumStmtBits + 5 + llvm::BitWidth<ExprDependence> };
class ConstantExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class ConstantExpr;
unsigned : NumExprBits;
/// The kind of result that is tail-allocated.
unsigned ResultKind : 2;
/// The kind of Result as defined by APValue::Kind.
unsigned APValueKind : 4;
/// When ResultKind == RSK_Int64, true if the tail-allocated integer is
/// unsigned.
unsigned IsUnsigned : 1;
/// When ResultKind == RSK_Int64. the BitWidth of the tail-allocated
/// integer. 7 bits because it is the minimal number of bits to represent a
/// value from 0 to 64 (the size of the tail-allocated integer).
unsigned BitWidth : 7;
/// When ResultKind == RSK_APValue, true if the ASTContext will cleanup the
/// tail-allocated APValue.
unsigned HasCleanup : 1;
/// True if this ConstantExpr was created for immediate invocation.
unsigned IsImmediateInvocation : 1;
};
class PredefinedExprBitfields {
friend class ASTStmtReader;
friend class PredefinedExpr;
unsigned : NumExprBits;
/// The kind of this PredefinedExpr. One of the enumeration values
/// in PredefinedExpr::IdentKind.
unsigned Kind : 4;
/// True if this PredefinedExpr has a trailing "StringLiteral *"
/// for the predefined identifier.
unsigned HasFunctionName : 1;
/// The location of this PredefinedExpr.
SourceLocation Loc;
};
class DeclRefExprBitfields {
friend class ASTStmtReader; // deserialization
friend class DeclRefExpr;
unsigned : NumExprBits;
unsigned HasQualifier : 1;
unsigned HasTemplateKWAndArgsInfo : 1;
unsigned HasFoundDecl : 1;
unsigned HadMultipleCandidates : 1;
unsigned RefersToEnclosingVariableOrCapture : 1;
unsigned NonOdrUseReason : 2;
/// The location of the declaration name itself.
SourceLocation Loc;
};
class FloatingLiteralBitfields {
friend class FloatingLiteral;
unsigned : NumExprBits;
unsigned Semantics : 3; // Provides semantics for APFloat construction
unsigned IsExact : 1;
};
class StringLiteralBitfields {
friend class ASTStmtReader;
friend class StringLiteral;
unsigned : NumExprBits;
/// The kind of this string literal.
/// One of the enumeration values of StringLiteral::StringKind.
unsigned Kind : 3;
/// The width of a single character in bytes. Only values of 1, 2,
/// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps
/// the target + string kind to the appropriate CharByteWidth.
unsigned CharByteWidth : 3;
unsigned IsPascal : 1;
/// The number of concatenated token this string is made of.
/// This is the number of trailing SourceLocation.
unsigned NumConcatenated;
};
class CharacterLiteralBitfields {
friend class CharacterLiteral;
unsigned : NumExprBits;
unsigned Kind : 3;
};
class UnaryOperatorBitfields {
friend class UnaryOperator;
unsigned : NumExprBits;
unsigned Opc : 5;
unsigned CanOverflow : 1;
//
/// This is only meaningful for operations on floating point
/// types when additional values need to be in trailing storage.
/// It is 0 otherwise.
unsigned HasFPFeatures : 1;
SourceLocation Loc;
};
class UnaryExprOrTypeTraitExprBitfields {
friend class UnaryExprOrTypeTraitExpr;
unsigned : NumExprBits;
unsigned Kind : 3;
unsigned IsType : 1; // true if operand is a type, false if an expression.
};
class ArrayOrMatrixSubscriptExprBitfields {
friend class ArraySubscriptExpr;
friend class MatrixSubscriptExpr;
unsigned : NumExprBits;
SourceLocation RBracketLoc;
};
class CallExprBitfields {
friend class CallExpr;
unsigned : NumExprBits;
unsigned NumPreArgs : 1;
/// True if the callee of the call expression was found using ADL.
unsigned UsesADL : 1;
/// True if the call expression has some floating-point features.
unsigned HasFPFeatures : 1;
/// Padding used to align OffsetToTrailingObjects to a byte multiple.
unsigned : 24 - 3 - NumExprBits;
/// The offset in bytes from the this pointer to the start of the
/// trailing objects belonging to CallExpr. Intentionally byte sized
/// for faster access.
unsigned OffsetToTrailingObjects : 8;
};
enum { NumCallExprBits = 32 };
class MemberExprBitfields {
friend class ASTStmtReader;
friend class MemberExpr;
unsigned : NumExprBits;
/// IsArrow - True if this is "X->F", false if this is "X.F".
unsigned IsArrow : 1;
/// True if this member expression used a nested-name-specifier to
/// refer to the member, e.g., "x->Base::f", or found its member via
/// a using declaration. When true, a MemberExprNameQualifier
/// structure is allocated immediately after the MemberExpr.
unsigned HasQualifierOrFoundDecl : 1;
/// True if this member expression specified a template keyword
/// and/or a template argument list explicitly, e.g., x->f<int>,
/// x->template f, x->template f<int>.
/// When true, an ASTTemplateKWAndArgsInfo structure and its
/// TemplateArguments (if any) are present.
unsigned HasTemplateKWAndArgsInfo : 1;
/// True if this member expression refers to a method that
/// was resolved from an overloaded set having size greater than 1.
unsigned HadMultipleCandidates : 1;
/// Value of type NonOdrUseReason indicating why this MemberExpr does
/// not constitute an odr-use of the named declaration. Meaningful only
/// when naming a static member.
unsigned NonOdrUseReason : 2;
/// This is the location of the -> or . in the expression.
SourceLocation OperatorLoc;
};
class CastExprBitfields {
friend class CastExpr;
friend class ImplicitCastExpr;
unsigned : NumExprBits;
unsigned Kind : 7;
unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr.
/// True if the call expression has some floating-point features.
unsigned HasFPFeatures : 1;
/// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough
/// here. ([implimits] Direct and indirect base classes [16384]).
unsigned BasePathSize;
};
class BinaryOperatorBitfields {
friend class BinaryOperator;
unsigned : NumExprBits;
unsigned Opc : 6;
/// This is only meaningful for operations on floating point
/// types when additional values need to be in trailing storage.
/// It is 0 otherwise.
unsigned HasFPFeatures : 1;
SourceLocation OpLoc;
};
class InitListExprBitfields {
friend class InitListExpr;
unsigned : NumExprBits;
/// Whether this initializer list originally had a GNU array-range
/// designator in it. This is a temporary marker used by CodeGen.
unsigned HadArrayRangeDesignator : 1;
};
class ParenListExprBitfields {
friend class ASTStmtReader;
friend class ParenListExpr;
unsigned : NumExprBits;
/// The number of expressions in the paren list.
unsigned NumExprs;
};
class GenericSelectionExprBitfields {
friend class ASTStmtReader;
friend class GenericSelectionExpr;
unsigned : NumExprBits;
/// The location of the "_Generic".
SourceLocation GenericLoc;
};
class PseudoObjectExprBitfields {
friend class ASTStmtReader; // deserialization
friend class PseudoObjectExpr;
unsigned : NumExprBits;
// These don't need to be particularly wide, because they're
// strictly limited by the forms of expressions we permit.
unsigned NumSubExprs : 8;
unsigned ResultIndex : 32 - 8 - NumExprBits;
};
class SourceLocExprBitfields {
friend class ASTStmtReader;
friend class SourceLocExpr;
unsigned : NumExprBits;
/// The kind of source location builtin represented by the SourceLocExpr.
/// Ex. __builtin_LINE, __builtin_FUNCTION, ect.
unsigned Kind : 2;
};
class StmtExprBitfields {
friend class ASTStmtReader;
friend class StmtExpr;
unsigned : NumExprBits;
/// The number of levels of template parameters enclosing this statement
/// expression. Used to determine if a statement expression remains
/// dependent after instantiation.
unsigned TemplateDepth;
};
//===--- C++ Expression bitfields classes ---===//
class CXXOperatorCallExprBitfields {
friend class ASTStmtReader;
friend class CXXOperatorCallExpr;
unsigned : NumCallExprBits;
/// The kind of this overloaded operator. One of the enumerator
/// value of OverloadedOperatorKind.
unsigned OperatorKind : 6;
};
class CXXRewrittenBinaryOperatorBitfields {
friend class ASTStmtReader;
friend class CXXRewrittenBinaryOperator;
unsigned : NumCallExprBits;
unsigned IsReversed : 1;
};
class CXXBoolLiteralExprBitfields {
friend class CXXBoolLiteralExpr;
unsigned : NumExprBits;
/// The value of the boolean literal.
unsigned Value : 1;
/// The location of the boolean literal.
SourceLocation Loc;
};
class CXXNullPtrLiteralExprBitfields {
friend class CXXNullPtrLiteralExpr;
unsigned : NumExprBits;
/// The location of the null pointer literal.
SourceLocation Loc;
};
class CXXThisExprBitfields {
friend class CXXThisExpr;
unsigned : NumExprBits;
/// Whether this is an implicit "this".
unsigned IsImplicit : 1;
/// The location of the "this".
SourceLocation Loc;
};
class CXXThrowExprBitfields {
friend class ASTStmtReader;
friend class CXXThrowExpr;
unsigned : NumExprBits;
/// Whether the thrown variable (if any) is in scope.
unsigned IsThrownVariableInScope : 1;
/// The location of the "throw".
SourceLocation ThrowLoc;
};
class CXXDefaultArgExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultArgExpr;
unsigned : NumExprBits;
/// The location where the default argument expression was used.
SourceLocation Loc;
};
class CXXDefaultInitExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultInitExpr;
unsigned : NumExprBits;
/// The location where the default initializer expression was used.
SourceLocation Loc;
};
class CXXScalarValueInitExprBitfields {
friend class ASTStmtReader;
friend class CXXScalarValueInitExpr;
unsigned : NumExprBits;
SourceLocation RParenLoc;
};
class CXXNewExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class CXXNewExpr;
unsigned : NumExprBits;
/// Was the usage ::new, i.e. is the global new to be used?
unsigned IsGlobalNew : 1;
/// Do we allocate an array? If so, the first trailing "Stmt *" is the
/// size expression.
unsigned IsArray : 1;
/// Should the alignment be passed to the allocation function?
unsigned ShouldPassAlignment : 1;
/// If this is an array allocation, does the usual deallocation
/// function for the allocated type want to know the allocated size?
unsigned UsualArrayDeleteWantsSize : 1;
/// What kind of initializer do we have? Could be none, parens, or braces.
/// In storage, we distinguish between "none, and no initializer expr", and
/// "none, but an implicit initializer expr".
unsigned StoredInitializationStyle : 2;
/// True if the allocated type was expressed as a parenthesized type-id.
unsigned IsParenTypeId : 1;
/// The number of placement new arguments.
unsigned NumPlacementArgs;
};
class CXXDeleteExprBitfields {
friend class ASTStmtReader;
friend class CXXDeleteExpr;
unsigned : NumExprBits;
/// Is this a forced global delete, i.e. "::delete"?
unsigned GlobalDelete : 1;
/// Is this the array form of delete, i.e. "delete[]"?
unsigned ArrayForm : 1;
/// ArrayFormAsWritten can be different from ArrayForm if 'delete' is
/// applied to pointer-to-array type (ArrayFormAsWritten will be false
/// while ArrayForm will be true).
unsigned ArrayFormAsWritten : 1;
/// Does the usual deallocation function for the element type require
/// a size_t argument?
unsigned UsualArrayDeleteWantsSize : 1;
/// Location of the expression.
SourceLocation Loc;
};
class TypeTraitExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class TypeTraitExpr;
unsigned : NumExprBits;
/// The kind of type trait, which is a value of a TypeTrait enumerator.
unsigned Kind : 8;
/// If this expression is not value-dependent, this indicates whether
/// the trait evaluated true or false.
unsigned Value : 1;
/// The number of arguments to this type trait. According to [implimits]
/// 8 bits would be enough, but we require (and test for) at least 16 bits
/// to mirror FunctionType.
unsigned NumArgs;
};
class DependentScopeDeclRefExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class DependentScopeDeclRefExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
};
class CXXConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXConstructExpr;
unsigned : NumExprBits;
unsigned Elidable : 1;
unsigned HadMultipleCandidates : 1;
unsigned ListInitialization : 1;
unsigned StdInitListInitialization : 1;
unsigned ZeroInitialization : 1;
unsigned ConstructionKind : 3;
SourceLocation Loc;
};
class ExprWithCleanupsBitfields {
friend class ASTStmtReader; // deserialization
friend class ExprWithCleanups;
unsigned : NumExprBits;
// When false, it must not have side effects.
unsigned CleanupsHaveSideEffects : 1;
unsigned NumObjects : 32 - 1 - NumExprBits;
};
class CXXUnresolvedConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXUnresolvedConstructExpr;
unsigned : NumExprBits;
/// The number of arguments used to construct the type.
unsigned NumArgs;
};
class CXXDependentScopeMemberExprBitfields {
friend class ASTStmtReader;
friend class CXXDependentScopeMemberExpr;
unsigned : NumExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether this member expression has info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// See getFirstQualifierFoundInScope() and the comment listing
/// the trailing objects.
unsigned HasFirstQualifierFoundInScope : 1;
/// The location of the '->' or '.' operator.
SourceLocation OperatorLoc;
};
class OverloadExprBitfields {
friend class ASTStmtReader;
friend class OverloadExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// Padding used by the derived classes to store various bits. If you
/// need to add some data here, shrink this padding and add your data
/// above. NumOverloadExprBits also needs to be updated.
unsigned : 32 - NumExprBits - 1;
/// The number of results.
unsigned NumResults;
};
enum { NumOverloadExprBits = NumExprBits + 1 };
class UnresolvedLookupExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedLookupExpr;
unsigned : NumOverloadExprBits;
/// True if these lookup results should be extended by
/// argument-dependent lookup if this is the operand of a function call.
unsigned RequiresADL : 1;
/// True if these lookup results are overloaded. This is pretty trivially
/// rederivable if we urgently need to kill this field.
unsigned Overloaded : 1;
};
static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4,
"UnresolvedLookupExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class UnresolvedMemberExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedMemberExpr;
unsigned : NumOverloadExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether the lookup results contain an unresolved using declaration.
unsigned HasUnresolvedUsing : 1;
};
static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4,
"UnresolvedMemberExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class CXXNoexceptExprBitfields {
friend class ASTStmtReader;
friend class CXXNoexceptExpr;
unsigned : NumExprBits;
unsigned Value : 1;
};
class SubstNonTypeTemplateParmExprBitfields {
friend class ASTStmtReader;
friend class SubstNonTypeTemplateParmExpr;
unsigned : NumExprBits;
/// The location of the non-type template parameter reference.
SourceLocation NameLoc;
};
class LambdaExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class LambdaExpr;
unsigned : NumExprBits;
/// The default capture kind, which is a value of type
/// LambdaCaptureDefault.
unsigned CaptureDefault : 2;
/// Whether this lambda had an explicit parameter list vs. an
/// implicit (and empty) parameter list.
unsigned ExplicitParams : 1;
/// Whether this lambda had the result type explicitly specified.
unsigned ExplicitResultType : 1;
/// The number of captures.
unsigned NumCaptures : 16;
};
class RequiresExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class RequiresExpr;
unsigned : NumExprBits;
unsigned IsSatisfied : 1;
SourceLocation RequiresKWLoc;
};
//===--- C++ Coroutines TS bitfields classes ---===//
class CoawaitExprBitfields {
friend class CoawaitExpr;
unsigned : NumExprBits;
unsigned IsImplicit : 1;
};
//===--- Obj-C Expression bitfields classes ---===//
class ObjCIndirectCopyRestoreExprBitfields {
friend class ObjCIndirectCopyRestoreExpr;
unsigned : NumExprBits;
unsigned ShouldCopy : 1;
};
//===--- Clang Extensions bitfields classes ---===//
class OpaqueValueExprBitfields {
friend class ASTStmtReader;
friend class OpaqueValueExpr;
unsigned : NumExprBits;
/// The OVE is a unique semantic reference to its source expression if this
/// bit is set to true.
unsigned IsUnique : 1;
SourceLocation Loc;
};
union {
// Same order as in StmtNodes.td.
// Statements
StmtBitfields StmtBits;
NullStmtBitfields NullStmtBits;
CompoundStmtBitfields CompoundStmtBits;
LabelStmtBitfields LabelStmtBits;
AttributedStmtBitfields AttributedStmtBits;
IfStmtBitfields IfStmtBits;
SwitchStmtBitfields SwitchStmtBits;
WhileStmtBitfields WhileStmtBits;
DoStmtBitfields DoStmtBits;
ForStmtBitfields ForStmtBits;
GotoStmtBitfields GotoStmtBits;
ContinueStmtBitfields ContinueStmtBits;
BreakStmtBitfields BreakStmtBits;
ReturnStmtBitfields ReturnStmtBits;
SwitchCaseBitfields SwitchCaseBits;
// Expressions
ExprBitfields ExprBits;
ConstantExprBitfields ConstantExprBits;
PredefinedExprBitfields PredefinedExprBits;
DeclRefExprBitfields DeclRefExprBits;
FloatingLiteralBitfields FloatingLiteralBits;
StringLiteralBitfields StringLiteralBits;
CharacterLiteralBitfields CharacterLiteralBits;
UnaryOperatorBitfields UnaryOperatorBits;
UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits;
ArrayOrMatrixSubscriptExprBitfields ArrayOrMatrixSubscriptExprBits;
CallExprBitfields CallExprBits;
MemberExprBitfields MemberExprBits;
CastExprBitfields CastExprBits;
BinaryOperatorBitfields BinaryOperatorBits;
InitListExprBitfields InitListExprBits;
ParenListExprBitfields ParenListExprBits;
GenericSelectionExprBitfields GenericSelectionExprBits;
PseudoObjectExprBitfields PseudoObjectExprBits;
SourceLocExprBitfields SourceLocExprBits;
// GNU Extensions.
StmtExprBitfields StmtExprBits;
// C++ Expressions
CXXOperatorCallExprBitfields CXXOperatorCallExprBits;
CXXRewrittenBinaryOperatorBitfields CXXRewrittenBinaryOperatorBits;
CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits;
CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits;
CXXThisExprBitfields CXXThisExprBits;
CXXThrowExprBitfields CXXThrowExprBits;
CXXDefaultArgExprBitfields CXXDefaultArgExprBits;
CXXDefaultInitExprBitfields CXXDefaultInitExprBits;
CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits;
CXXNewExprBitfields CXXNewExprBits;
CXXDeleteExprBitfields CXXDeleteExprBits;
TypeTraitExprBitfields TypeTraitExprBits;
DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits;
CXXConstructExprBitfields CXXConstructExprBits;
ExprWithCleanupsBitfields ExprWithCleanupsBits;
CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits;
CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits;
OverloadExprBitfields OverloadExprBits;
UnresolvedLookupExprBitfields UnresolvedLookupExprBits;
UnresolvedMemberExprBitfields UnresolvedMemberExprBits;
CXXNoexceptExprBitfields CXXNoexceptExprBits;
SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits;
LambdaExprBitfields LambdaExprBits;
RequiresExprBitfields RequiresExprBits;
// C++ Coroutines TS expressions
CoawaitExprBitfields CoawaitBits;
// Obj-C Expressions
ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits;
// Clang Extensions
OpaqueValueExprBitfields OpaqueValueExprBits;
};
public:
// Only allow allocation of Stmts using the allocator in ASTContext
// or by doing a placement new.
void* operator new(size_t bytes, const ASTContext& C,
unsigned alignment = 8);
void* operator new(size_t bytes, const ASTContext* C,
unsigned alignment = 8) {
return operator new(bytes, *C, alignment);
}
void *operator new(size_t bytes, void *mem) noexcept { return mem; }
void operator delete(void *, const ASTContext &, unsigned) noexcept {}
void operator delete(void *, const ASTContext *, unsigned) noexcept {}
void operator delete(void *, size_t) noexcept {}
void operator delete(void *, void *) noexcept {}
public:
/// A placeholder type used to construct an empty shell of a
/// type, that will be filled in later (e.g., by some
/// de-serialization).
struct EmptyShell {};
/// The likelihood of a branch being taken.
enum Likelihood {
LH_Unlikely = -1, ///< Branch has the [[unlikely]] attribute.
LH_None, ///< No attribute set or branches of the IfStmt have
///< the same attribute.
LH_Likely ///< Branch has the [[likely]] attribute.
};
protected:
/// Iterator for iterating over Stmt * arrays that contain only T *.
///
/// This is needed because AST nodes use Stmt* arrays to store
/// references to children (to be compatible with StmtIterator).
template<typename T, typename TPtr = T *, typename StmtPtr = Stmt *>
struct CastIterator
: llvm::iterator_adaptor_base<CastIterator<T, TPtr, StmtPtr>, StmtPtr *,
std::random_access_iterator_tag, TPtr> {
using Base = typename CastIterator::iterator_adaptor_base;
CastIterator() : Base(nullptr) {}
CastIterator(StmtPtr *I) : Base(I) {}
typename Base::value_type operator*() const {
return cast_or_null<T>(*this->I);
}
};
/// Const iterator for iterating over Stmt * arrays that contain only T *.
template <typename T>
using ConstCastIterator = CastIterator<T, const T *const, const Stmt *const>;
using ExprIterator = CastIterator<Expr>;
using ConstExprIterator = ConstCastIterator<Expr>;
private:
/// Whether statistic collection is enabled.
static bool StatisticsEnabled;
protected:
/// Construct an empty statement.
explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
Stmt() = delete;
Stmt(const Stmt &) = delete;
Stmt(Stmt &&) = delete;
Stmt &operator=(const Stmt &) = delete;
Stmt &operator=(Stmt &&) = delete;
Stmt(StmtClass SC) {
static_assert(sizeof(*this) <= 8,
"changing bitfields changed sizeof(Stmt)");
static_assert(sizeof(*this) % alignof(void *) == 0,
"Insufficient alignment!");
StmtBits.sClass = SC;
if (StatisticsEnabled) Stmt::addStmtClass(SC);
}
StmtClass getStmtClass() const {
return static_cast<StmtClass>(StmtBits.sClass);
}
const char *getStmtClassName() const;
/// SourceLocation tokens are not useful in isolation - they are low level
/// value objects created/interpreted by SourceManager. We assume AST
/// clients will have a pointer to the respective SourceManager.
SourceRange getSourceRange() const LLVM_READONLY;
SourceLocation getBeginLoc() const LLVM_READONLY;
SourceLocation getEndLoc() const LLVM_READONLY;
// global temp stats (until we have a per-module visitor)
static void addStmtClass(const StmtClass s);
static void EnableStatistics();
static void PrintStats();
/// \returns the likelihood of a set of attributes.
static Likelihood getLikelihood(ArrayRef<const Attr *> Attrs);
/// \returns the likelihood of a statement.
static Likelihood getLikelihood(const Stmt *S);
/// \returns the likelihood attribute of a statement.
static const Attr *getLikelihoodAttr(const Stmt *S);
/// \returns the likelihood of the 'then' branch of an 'if' statement. The
/// 'else' branch is required to determine whether both branches specify the
/// same likelihood, which affects the result.
static Likelihood getLikelihood(const Stmt *Then, const Stmt *Else);
/// \returns whether the likelihood of the branches of an if statement are
/// conflicting. When the first element is \c true there's a conflict and
/// the Attr's are the conflicting attributes of the Then and Else Stmt.
static std::tuple<bool, const Attr *, const Attr *>
determineLikelihoodConflict(const Stmt *Then, const Stmt *Else);
/// Dumps the specified AST fragment and all subtrees to
/// \c llvm::errs().
void dump() const;
void dump(raw_ostream &OS, const ASTContext &Context) const;
/// \return Unique reproducible object identifier
int64_t getID(const ASTContext &Context) const;
/// dumpColor - same as dump(), but forces color highlighting.
void dumpColor() const;
/// dumpPretty/printPretty - These two methods do a "pretty print" of the AST
/// back to its original source language syntax.
void dumpPretty(const ASTContext &Context) const;
void printPretty(raw_ostream &OS, PrinterHelper *Helper,
const PrintingPolicy &Policy, unsigned Indentation = 0,
StringRef NewlineSymbol = "\n",
const ASTContext *Context = nullptr) const;
/// Pretty-prints in JSON format.
void printJson(raw_ostream &Out, PrinterHelper *Helper,
const PrintingPolicy &Policy, bool AddQuotes) const;
/// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only
/// works on systems with GraphViz (Mac OS X) or dot+gv installed.
void viewAST() const;
/// Skip no-op (attributed, compound) container stmts and skip captured
/// stmt at the top, if \a IgnoreCaptured is true.
Stmt *IgnoreContainers(bool IgnoreCaptured = false);
const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const {
return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured);
}
const Stmt *stripLabelLikeStatements() const;
Stmt *stripLabelLikeStatements() {
return const_cast<Stmt*>(
const_cast<const Stmt*>(this)->stripLabelLikeStatements());
}
/// Child Iterators: All subclasses must implement 'children'
/// to permit easy iteration over the substatements/subexpessions of an
/// AST node. This permits easy iteration over all nodes in the AST.
using child_iterator = StmtIterator;
using const_child_iterator = ConstStmtIterator;
using child_range = llvm::iterator_range<child_iterator>;
using const_child_range = llvm::iterator_range<const_child_iterator>;
child_range children();
const_child_range children() const {
auto Children = const_cast<Stmt *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_iterator child_begin() { return children().begin(); }
child_iterator child_end() { return children().end(); }
const_child_iterator child_begin() const { return children().begin(); }
const_child_iterator child_end() const { return children().end(); }
/// Produce a unique representation of the given statement.
///
/// \param ID once the profiling operation is complete, will contain
/// the unique representation of the given statement.
///
/// \param Context the AST context in which the statement resides
///
/// \param Canonical whether the profile should be based on the canonical
/// representation of this statement (e.g., where non-type template
/// parameters are identified by index/level rather than their
/// declaration pointers) or the exact representation of the statement as
/// written in the source.
void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical) const;
/// Calculate a unique representation for a statement that is
/// stable across compiler invocations.
///
/// \param ID profile information will be stored in ID.
///
/// \param Hash an ODRHash object which will be called where pointers would
/// have been used in the Profile function.
void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const;
};
/// DeclStmt - Adaptor class for mixing declarations with statements and
/// expressions. For example, CompoundStmt mixes statements, expressions
/// and declarations (variables, types). Another example is ForStmt, where
/// the first statement can be an expression or a declaration.
class DeclStmt : public Stmt {
DeclGroupRef DG;
SourceLocation StartLoc, EndLoc;
public:
DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc)
: Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {}
/// Build an empty declaration statement.
explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {}
/// isSingleDecl - This method returns true if this DeclStmt refers
/// to a single Decl.
bool isSingleDecl() const { return DG.isSingleDecl(); }
const Decl *getSingleDecl() const { return DG.getSingleDecl(); }
Decl *getSingleDecl() { return DG.getSingleDecl(); }
const DeclGroupRef getDeclGroup() const { return DG; }
DeclGroupRef getDeclGroup() { return DG; }
void setDeclGroup(DeclGroupRef DGR) { DG = DGR; }
void setStartLoc(SourceLocation L) { StartLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DeclStmtClass;
}
// Iterators over subexpressions.
child_range children() {
return child_range(child_iterator(DG.begin(), DG.end()),
child_iterator(DG.end(), DG.end()));
}
const_child_range children() const {
auto Children = const_cast<DeclStmt *>(this)->children();
return const_child_range(Children);
}
using decl_iterator = DeclGroupRef::iterator;
using const_decl_iterator = DeclGroupRef::const_iterator;
using decl_range = llvm::iterator_range<decl_iterator>;
using decl_const_range = llvm::iterator_range<const_decl_iterator>;
decl_range decls() { return decl_range(decl_begin(), decl_end()); }
decl_const_range decls() const {
return decl_const_range(decl_begin(), decl_end());
}
decl_iterator decl_begin() { return DG.begin(); }
decl_iterator decl_end() { return DG.end(); }
const_decl_iterator decl_begin() const { return DG.begin(); }
const_decl_iterator decl_end() const { return DG.end(); }
using reverse_decl_iterator = std::reverse_iterator<decl_iterator>;
reverse_decl_iterator decl_rbegin() {
return reverse_decl_iterator(decl_end());
}
reverse_decl_iterator decl_rend() {
return reverse_decl_iterator(decl_begin());
}
};
/// NullStmt - This is the null statement ";": C99 6.8.3p3.
///
class NullStmt : public Stmt {
public:
NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false)
: Stmt(NullStmtClass) {
NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro;
setSemiLoc(L);
}
/// Build an empty null statement.
explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {}
SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; }
void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; }
bool hasLeadingEmptyMacro() const {
return NullStmtBits.HasLeadingEmptyMacro;
}
SourceLocation getBeginLoc() const { return getSemiLoc(); }
SourceLocation getEndLoc() const { return getSemiLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == NullStmtClass;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// CompoundStmt - This represents a group of statements like { stmt stmt }.
class CompoundStmt final : public Stmt,
private llvm::TrailingObjects<CompoundStmt, Stmt *> {
friend class ASTStmtReader;
friend TrailingObjects;
/// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits.
SourceLocation RBraceLoc;
CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB);
explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {}
void setStmts(ArrayRef<Stmt *> Stmts);
public:
static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts,
SourceLocation LB, SourceLocation RB);
// Build an empty compound statement with a location.
explicit CompoundStmt(SourceLocation Loc)
: Stmt(CompoundStmtClass), RBraceLoc(Loc) {
CompoundStmtBits.NumStmts = 0;
CompoundStmtBits.LBraceLoc = Loc;
}
// Build an empty compound statement.
static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts);
bool body_empty() const { return CompoundStmtBits.NumStmts == 0; }
unsigned size() const { return CompoundStmtBits.NumStmts; }
using body_iterator = Stmt **;
using body_range = llvm::iterator_range<body_iterator>;
body_range body() { return body_range(body_begin(), body_end()); }
body_iterator body_begin() { return getTrailingObjects<Stmt *>(); }
body_iterator body_end() { return body_begin() + size(); }
Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; }
Stmt *body_back() {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using const_body_iterator = Stmt *const *;
using body_const_range = llvm::iterator_range<const_body_iterator>;
body_const_range body() const {
return body_const_range(body_begin(), body_end());
}
const_body_iterator body_begin() const {
return getTrailingObjects<Stmt *>();
}
const_body_iterator body_end() const { return body_begin() + size(); }
const Stmt *body_front() const {
return !body_empty() ? body_begin()[0] : nullptr;
}
const Stmt *body_back() const {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using reverse_body_iterator = std::reverse_iterator<body_iterator>;
reverse_body_iterator body_rbegin() {
return reverse_body_iterator(body_end());
}
reverse_body_iterator body_rend() {
return reverse_body_iterator(body_begin());
}
using const_reverse_body_iterator =
std::reverse_iterator<const_body_iterator>;
const_reverse_body_iterator body_rbegin() const {
return const_reverse_body_iterator(body_end());
}
const_reverse_body_iterator body_rend() const {
return const_reverse_body_iterator(body_begin());
}
// Get the Stmt that StmtExpr would consider to be the result of this
// compound statement. This is used by StmtExpr to properly emulate the GCC
// compound expression extension, which ignores trailing NullStmts when
// getting the result of the expression.
// i.e. ({ 5;;; })
// ^^ ignored
// If we don't find something that isn't a NullStmt, just return the last
// Stmt.
Stmt *getStmtExprResult() {
for (auto *B : llvm::reverse(body())) {
if (!isa<NullStmt>(B))
return B;
}
return body_back();
}
const Stmt *getStmtExprResult() const {
return const_cast<CompoundStmt *>(this)->getStmtExprResult();
}
SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getEndLoc() const { return RBraceLoc; }
SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getRBracLoc() const { return RBraceLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CompoundStmtClass;
}
// Iterators
child_range children() { return child_range(body_begin(), body_end()); }
const_child_range children() const {
return const_child_range(body_begin(), body_end());
}
};
// SwitchCase is the base class for CaseStmt and DefaultStmt,
class SwitchCase : public Stmt {
protected:
/// The location of the ":".
SourceLocation ColonLoc;
// The location of the "case" or "default" keyword. Stored in SwitchCaseBits.
// SourceLocation KeywordLoc;
/// A pointer to the following CaseStmt or DefaultStmt class,
/// used by SwitchStmt.
SwitchCase *NextSwitchCase = nullptr;
SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc)
: Stmt(SC), ColonLoc(ColonLoc) {
setKeywordLoc(KWLoc);
}
SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; }
SwitchCase *getNextSwitchCase() { return NextSwitchCase; }
void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; }
SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; }
void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
inline Stmt *getSubStmt();
const Stmt *getSubStmt() const {
return const_cast<SwitchCase *>(this)->getSubStmt();
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
inline SourceLocation getEndLoc() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass ||
T->getStmtClass() == DefaultStmtClass;
}
};
/// CaseStmt - Represent a case statement. It can optionally be a GNU case
/// statement of the form LHS ... RHS representing a range of cases.
class CaseStmt final
: public SwitchCase,
private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// CaseStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing objects
// at the end but this would impact children().
// The trailing objects are in order:
//
// * A "Stmt *" for the LHS of the case statement. Always present.
//
// * A "Stmt *" for the RHS of the case statement. This is a GNU extension
// which allow ranges in cases statement of the form LHS ... RHS.
// Present if and only if caseStmtIsGNURange() is true.
//
// * A "Stmt *" for the substatement of the case statement. Always present.
//
// * A SourceLocation for the location of the ... if this is a case statement
// with a range. Present if and only if caseStmtIsGNURange() is true.
enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + caseStmtIsGNURange();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return caseStmtIsGNURange();
}
unsigned lhsOffset() const { return LhsOffset; }
unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); }
unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; }
/// Build a case statement assuming that the storage for the
/// trailing objects has been properly allocated.
CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc,
SourceLocation ellipsisLoc, SourceLocation colonLoc)
: SwitchCase(CaseStmtClass, caseLoc, colonLoc) {
// Handle GNU case statements of the form LHS ... RHS.
bool IsGNURange = rhs != nullptr;
SwitchCaseBits.CaseStmtIsGNURange = IsGNURange;
setLHS(lhs);
setSubStmt(nullptr);
if (IsGNURange) {
setRHS(rhs);
setEllipsisLoc(ellipsisLoc);
}
}
/// Build an empty switch case statement.
explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange)
: SwitchCase(CaseStmtClass, Empty) {
SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange;
}
public:
/// Build a case statement.
static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs,
SourceLocation caseLoc, SourceLocation ellipsisLoc,
SourceLocation colonLoc);
/// Build an empty case statement.
static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange);
/// True if this case statement is of the form case LHS ... RHS, which
/// is a GNU extension. In this case the RHS can be obtained with getRHS()
/// and the location of the ellipsis can be obtained with getEllipsisLoc().
bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; }
SourceLocation getCaseLoc() const { return getKeywordLoc(); }
void setCaseLoc(SourceLocation L) { setKeywordLoc(L); }
/// Get the location of the ... in a case statement of the form LHS ... RHS.
SourceLocation getEllipsisLoc() const {
return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
/// Set the location of the ... in a case statement of the form LHS ... RHS.
/// Assert that this case statement is of this form.
void setEllipsisLoc(SourceLocation L) {
assert(
caseStmtIsGNURange() &&
"setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!");
*getTrailingObjects<SourceLocation>() = L;
}
Expr *getLHS() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
const Expr *getLHS() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
void setLHS(Expr *Val) {
getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Expr *getRHS() {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
const Expr *getRHS() const {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
void setRHS(Expr *Val) {
assert(caseStmtIsGNURange() &&
"setRHS but this is not a case stmt of the form LHS ... RHS!");
getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; }
const Stmt *getSubStmt() const {
return getTrailingObjects<Stmt *>()[subStmtOffset()];
}
void setSubStmt(Stmt *S) {
getTrailingObjects<Stmt *>()[subStmtOffset()] = S;
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
// Handle deeply nested case statements with iteration instead of recursion.
const CaseStmt *CS = this;
while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt()))
CS = CS2;
return CS->getSubStmt()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
class DefaultStmt : public SwitchCase {
Stmt *SubStmt;
public:
DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt)
: SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {}
/// Build an empty default statement.
explicit DefaultStmt(EmptyShell Empty)
: SwitchCase(DefaultStmtClass, Empty) {}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *S) { SubStmt = S; }
SourceLocation getDefaultLoc() const { return getKeywordLoc(); }
void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); }
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return SubStmt->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == DefaultStmtClass;
}
// Iterators
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
};
SourceLocation SwitchCase::getEndLoc() const {
if (const auto *CS = dyn_cast<CaseStmt>(this))
return CS->getEndLoc();
else if (const auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getEndLoc();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
Stmt *SwitchCase::getSubStmt() {
if (auto *CS = dyn_cast<CaseStmt>(this))
return CS->getSubStmt();
else if (auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getSubStmt();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
/// Represents a statement that could possibly have a value and type. This
/// covers expression-statements, as well as labels and attributed statements.
///
/// Value statements have a special meaning when they are the last non-null
/// statement in a GNU statement expression, where they determine the value
/// of the statement expression.
class ValueStmt : public Stmt {
protected:
using Stmt::Stmt;
public:
const Expr *getExprStmt() const;
Expr *getExprStmt() {
const ValueStmt *ConstThis = this;
return const_cast<Expr*>(ConstThis->getExprStmt());
}
static bool classof(const Stmt *T) {
return T->getStmtClass() >= firstValueStmtConstant &&
T->getStmtClass() <= lastValueStmtConstant;
}
};
/// LabelStmt - Represents a label, which has a substatement. For example:
/// foo: return;
class LabelStmt : public ValueStmt {
LabelDecl *TheDecl;
Stmt *SubStmt;
public:
/// Build a label statement.
LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt)
: ValueStmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) {
setIdentLoc(IL);
}
/// Build an empty label statement.
explicit LabelStmt(EmptyShell Empty) : ValueStmt(LabelStmtClass, Empty) {}
SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; }
void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; }
LabelDecl *getDecl() const { return TheDecl; }
void setDecl(LabelDecl *D) { TheDecl = D; }
const char *getName() const;
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *SS) { SubStmt = SS; }
SourceLocation getBeginLoc() const { return getIdentLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == LabelStmtClass;
}
};
/// Represents an attribute applied to a statement.
///
/// Represents an attribute applied to a statement. For example:
/// [[omp::for(...)]] for (...) { ... }
class AttributedStmt final
: public ValueStmt,
private llvm::TrailingObjects<AttributedStmt, const Attr *> {
friend class ASTStmtReader;
friend TrailingObjects;
Stmt *SubStmt;
AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs,
Stmt *SubStmt)
: ValueStmt(AttributedStmtClass), SubStmt(SubStmt) {
AttributedStmtBits.NumAttrs = Attrs.size();
AttributedStmtBits.AttrLoc = Loc;
std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr());
}
explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs)
: ValueStmt(AttributedStmtClass, Empty) {
AttributedStmtBits.NumAttrs = NumAttrs;
AttributedStmtBits.AttrLoc = SourceLocation{};
std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr);
}
const Attr *const *getAttrArrayPtr() const {
return getTrailingObjects<const Attr *>();
}
const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); }
public:
static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc,
ArrayRef<const Attr *> Attrs, Stmt *SubStmt);
// Build an empty attributed statement.
static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs);
SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; }
ArrayRef<const Attr *> getAttrs() const {
return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs);
}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
SourceLocation getBeginLoc() const { return getAttrLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == AttributedStmtClass;
}
};
/// IfStmt - This represents an if/then/else.
class IfStmt final
: public Stmt,
private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// IfStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing
// objects at then end but this would change the order of the children.
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact a "Expr *".
//
// * A "Stmt *" for the then statement.
// Always present.
//
// * A "Stmt *" for the else statement.
// Present if and only if hasElseStorage().
//
// * A "SourceLocation" for the location of the "else".
// Present if and only if hasElseStorage().
enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 };
enum { NumMandatoryStmtPtr = 2 };
SourceLocation LParenLoc;
SourceLocation RParenLoc;
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() +
hasInitStorage();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return hasElseStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; }
unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; }
/// Build an if/then/else statement.
IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init,
VarDecl *Var, Expr *Cond, SourceLocation LParenLoc,
SourceLocation RParenLoc, Stmt *Then, SourceLocation EL, Stmt *Else);
/// Build an empty if/then/else statement.
explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit);
public:
/// Create an IfStmt.
static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL,
bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond,
SourceLocation LPL, SourceLocation RPL, Stmt *Then,
SourceLocation EL = SourceLocation(),
Stmt *Else = nullptr);
/// Create an empty IfStmt optionally with storage for an else statement,
/// condition variable and init expression.
static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar,
bool HasInit);
/// True if this IfStmt has the storage for an init statement.
bool hasInitStorage() const { return IfStmtBits.HasInit; }
/// True if this IfStmt has storage for a variable declaration.
bool hasVarStorage() const { return IfStmtBits.HasVar; }
/// True if this IfStmt has storage for an else statement.
bool hasElseStorage() const { return IfStmtBits.HasElse; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; }
const Stmt *getThen() const {
return getTrailingObjects<Stmt *>()[thenOffset()];
}
void setThen(Stmt *Then) {
getTrailingObjects<Stmt *>()[thenOffset()] = Then;
}
Stmt *getElse() {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
const Stmt *getElse() const {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
void setElse(Stmt *Else) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
getTrailingObjects<Stmt *>()[elseOffset()] = Else;
}
/// Retrieve the variable declared in this "if" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// if (int x = foo()) {
/// printf("x is %d", x);
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<IfStmt *>(this)->getConditionVariable();
}
/// Set the condition variable for this if statement.
/// The if statement must have storage for the condition variable.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this IfStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This if statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; }
void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; }
SourceLocation getElseLoc() const {
return hasElseStorage() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
void setElseLoc(SourceLocation ElseLoc) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
*getTrailingObjects<SourceLocation>() = ElseLoc;
}
bool isConstexpr() const { return IfStmtBits.IsConstexpr; }
void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; }
/// If this is an 'if constexpr', determine which substatement will be taken.
/// Otherwise, or if the condition is value-dependent, returns None.
Optional<const Stmt*> getNondiscardedCase(const ASTContext &Ctx) const;
bool isObjCAvailabilityCheck() const;
SourceLocation getBeginLoc() const { return getIfLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
if (getElse())
return getElse()->getEndLoc();
return getThen()->getEndLoc();
}
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation Loc) { RParenLoc = Loc; }
// Iterators over subexpressions. The iterators will include iterating
// over the initialization expression referenced by the condition variable.
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == IfStmtClass;
}
};
/// SwitchStmt - This represents a 'switch' stmt.
class SwitchStmt final : public Stmt,
private llvm::TrailingObjects<SwitchStmt, Stmt *> {
friend TrailingObjects;
/// Points to a linked list of case and default statements.
SwitchCase *FirstCase = nullptr;
// SwitchStmt is followed by several trailing objects,
// some of which optional. Note that it would be more convenient to
// put the optional trailing objects at the end but this would change
// the order in children().
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
enum { InitOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
SourceLocation LParenLoc;
SourceLocation RParenLoc;
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
/// Build a switch statement.
SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond,
SourceLocation LParenLoc, SourceLocation RParenLoc);
/// Build a empty switch statement.
explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar);
public:
/// Create a switch statement.
static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var,
Expr *Cond, SourceLocation LParenLoc,
SourceLocation RParenLoc);
/// Create an empty switch statement optionally with storage for
/// an init expression and a condition variable.
static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit,
bool HasVar);
/// True if this SwitchStmt has storage for an init statement.
bool hasInitStorage() const { return SwitchStmtBits.HasInit; }
/// True if this SwitchStmt has storage for a condition variable.
bool hasVarStorage() const { return SwitchStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This switch statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
/// Retrieve the variable declared in this "switch" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// switch (int x = foo()) {
/// case 0: break;
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<SwitchStmt *>(this)->getConditionVariable();
}
/// Set the condition variable in this switch statement.
/// The switch statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *VD);
/// If this SwitchStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SwitchCase *getSwitchCaseList() { return FirstCase; }
const SwitchCase *getSwitchCaseList() const { return FirstCase; }
void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; }
SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; }
void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation Loc) { RParenLoc = Loc; }
void setBody(Stmt *S, SourceLocation SL) {
setBody(S);
setSwitchLoc(SL);
}
void addSwitchCase(SwitchCase *SC) {
assert(!SC->getNextSwitchCase() &&
"case/default already added to a switch");
SC->setNextSwitchCase(FirstCase);
FirstCase = SC;
}
/// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a
/// switch over an enum value then all cases have been explicitly covered.
void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; }
/// Returns true if the SwitchStmt is a switch of an enum value and all cases
/// have been explicitly covered.
bool isAllEnumCasesCovered() const {
return SwitchStmtBits.AllEnumCasesCovered;
}
SourceLocation getBeginLoc() const { return getSwitchLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody() ? getBody()->getEndLoc()
: reinterpret_cast<const Stmt *>(getCond())->getEndLoc();
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SwitchStmtClass;
}
};
/// WhileStmt - This represents a 'while' stmt.
class WhileStmt final : public Stmt,
private llvm::TrailingObjects<WhileStmt, Stmt *> {
friend TrailingObjects;
// WhileStmt is followed by several trailing objects,
// some of which optional. Note that it would be more
// convenient to put the optional trailing object at the end
// but this would affect children().
// The trailing objects are in order:
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
//
enum { VarOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
SourceLocation LParenLoc, RParenLoc;
unsigned varOffset() const { return VarOffset; }
unsigned condOffset() const { return VarOffset + hasVarStorage(); }
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasVarStorage();
}
/// Build a while statement.
WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body,
SourceLocation WL, SourceLocation LParenLoc,
SourceLocation RParenLoc);
/// Build an empty while statement.
explicit WhileStmt(EmptyShell Empty, bool HasVar);
public:
/// Create a while statement.
static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond,
Stmt *Body, SourceLocation WL,
SourceLocation LParenLoc, SourceLocation RParenLoc);
/// Create an empty while statement optionally with storage for
/// a condition variable.
static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar);
/// True if this WhileStmt has storage for a condition variable.
bool hasVarStorage() const { return WhileStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
/// Retrieve the variable declared in this "while" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// while (int x = random()) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<WhileStmt *>(this)->getConditionVariable();
}
/// Set the condition variable of this while statement.
/// The while statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this WhileStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation L) { LParenLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getWhileLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == WhileStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
/// DoStmt - This represents a 'do/while' stmt.
class DoStmt : public Stmt {
enum { BODY, COND, END_EXPR };
Stmt *SubExprs[END_EXPR];
SourceLocation WhileLoc;
SourceLocation RParenLoc; // Location of final ')' in do stmt condition.
public:
DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL,
SourceLocation RP)
: Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) {
setCond(Cond);
setBody(Body);
setDoLoc(DL);
}
/// Build an empty do-while statement.
explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {}
Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); }
const Expr *getCond() const {
return reinterpret_cast<Expr *>(SubExprs[COND]);
}
void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setBody(Stmt *Body) { SubExprs[BODY] = Body; }
SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; }
void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; }
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getDoLoc(); }
SourceLocation getEndLoc() const { return getRParenLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DoStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of
/// the init/cond/inc parts of the ForStmt will be null if they were not
/// specified in the source.
class ForStmt : public Stmt {
enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt.
SourceLocation LParenLoc, RParenLoc;
public:
ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
SourceLocation RP);
/// Build an empty for statement.
explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {}
Stmt *getInit() { return SubExprs[INIT]; }
/// Retrieve the variable declared in this "for" statement, if any.
///
/// In the following example, "y" is the condition variable.
/// \code
/// for (int x = random(); int y = mangle(x); ++x) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this ForStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]);
}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getInit() const { return SubExprs[INIT]; }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setInit(Stmt *S) { SubExprs[INIT] = S; }
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getForLoc() const { return ForStmtBits.ForLoc; }
void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation L) { LParenLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getForLoc(); }
SourceLocation getEndLoc() const { return getBody()->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ForStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// GotoStmt - This represents a direct goto.
class GotoStmt : public Stmt {
LabelDecl *Label;
SourceLocation LabelLoc;
public:
GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL)
: Stmt(GotoStmtClass), Label(label), LabelLoc(LL) {
setGotoLoc(GL);
}
/// Build an empty goto statement.
explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {}
LabelDecl *getLabel() const { return Label; }
void setLabel(LabelDecl *D) { Label = D; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getLabelLoc() const { return LabelLoc; }
void setLabelLoc(SourceLocation L) { LabelLoc = L; }
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const { return getLabelLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GotoStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// IndirectGotoStmt - This represents an indirect goto.
class IndirectGotoStmt : public Stmt {
SourceLocation StarLoc;
Stmt *Target;
public:
IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target)
: Stmt(IndirectGotoStmtClass), StarLoc(starLoc) {
setTarget(target);
setGotoLoc(gotoLoc);
}
/// Build an empty indirect goto statement.
explicit IndirectGotoStmt(EmptyShell Empty)
: Stmt(IndirectGotoStmtClass, Empty) {}
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setStarLoc(SourceLocation L) { StarLoc = L; }
SourceLocation getStarLoc() const { return StarLoc; }
Expr *getTarget() { return reinterpret_cast<Expr *>(Target); }
const Expr *getTarget() const {
return reinterpret_cast<const Expr *>(Target);
}
void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); }
/// getConstantTarget - Returns the fixed target of this indirect
/// goto, if one exists.
LabelDecl *getConstantTarget();
const LabelDecl *getConstantTarget() const {
return const_cast<IndirectGotoStmt *>(this)->getConstantTarget();
}
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == IndirectGotoStmtClass;
}
// Iterators
child_range children() { return child_range(&Target, &Target + 1); }
const_child_range children() const {
return const_child_range(&Target, &Target + 1);
}
};
/// ContinueStmt - This represents a continue.
class ContinueStmt : public Stmt {
public:
ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) {
setContinueLoc(CL);
}
/// Build an empty continue statement.
explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {}
SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; }
void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; }
SourceLocation getBeginLoc() const { return getContinueLoc(); }
SourceLocation getEndLoc() const { return getContinueLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ContinueStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// BreakStmt - This represents a break.
class BreakStmt : public Stmt {
public:
BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) {
setBreakLoc(BL);
}
/// Build an empty break statement.
explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {}
SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; }
void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; }
SourceLocation getBeginLoc() const { return getBreakLoc(); }
SourceLocation getEndLoc() const { return getBreakLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == BreakStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// ReturnStmt - This represents a return, optionally of an expression:
/// return;
/// return 4;
///
/// Note that GCC allows return with no argument in a function declared to
/// return a value, and it allows returning a value in functions declared to
/// return void. We explicitly model this in the AST, which means you can't
/// depend on the return type of the function and the presence of an argument.
class ReturnStmt final
: public Stmt,
private llvm::TrailingObjects<ReturnStmt, const VarDecl *> {
friend TrailingObjects;
/// The return expression.
Stmt *RetExpr;
// ReturnStmt is followed optionally by a trailing "const VarDecl *"
// for the NRVO candidate. Present if and only if hasNRVOCandidate().
/// True if this ReturnStmt has storage for an NRVO candidate.
bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; }
unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const {
return hasNRVOCandidate();
}
/// Build a return statement.
ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate);
/// Build an empty return statement.
explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate);
public:
/// Create a return statement.
static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E,
const VarDecl *NRVOCandidate);
/// Create an empty return statement, optionally with
/// storage for an NRVO candidate.
static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate);
Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); }
const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); }
void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); }
/// Retrieve the variable that might be used for the named return
/// value optimization.
///
/// The optimization itself can only be performed if the variable is
/// also marked as an NRVO object.
const VarDecl *getNRVOCandidate() const {
return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>()
: nullptr;
}
/// Set the variable that might be used for the named return value
/// optimization. The return statement must have storage for it,
/// which is the case if and only if hasNRVOCandidate() is true.
void setNRVOCandidate(const VarDecl *Var) {
assert(hasNRVOCandidate() &&
"This return statement has no storage for an NRVO candidate!");
*getTrailingObjects<const VarDecl *>() = Var;
}
SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; }
void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; }
SourceLocation getBeginLoc() const { return getReturnLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return RetExpr ? RetExpr->getEndLoc() : getReturnLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == ReturnStmtClass;
}
// Iterators
child_range children() {
if (RetExpr)
return child_range(&RetExpr, &RetExpr + 1);
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
if (RetExpr)
return const_child_range(&RetExpr, &RetExpr + 1);
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
class AsmStmt : public Stmt {
protected:
friend class ASTStmtReader;
SourceLocation AsmLoc;
/// True if the assembly statement does not have any input or output
/// operands.
bool IsSimple;
/// If true, treat this inline assembly as having side effects.
/// This assembly statement should not be optimized, deleted or moved.
bool IsVolatile;
unsigned NumOutputs;
unsigned NumInputs;
unsigned NumClobbers;
Stmt **Exprs = nullptr;
AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile,
unsigned numoutputs, unsigned numinputs, unsigned numclobbers)
: Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile),
NumOutputs(numoutputs), NumInputs(numinputs),
NumClobbers(numclobbers) {}
public:
/// Build an empty inline-assembly statement.
explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {}
SourceLocation getAsmLoc() const { return AsmLoc; }
void setAsmLoc(SourceLocation L) { AsmLoc = L; }
bool isSimple() const { return IsSimple; }
void setSimple(bool V) { IsSimple = V; }
bool isVolatile() const { return IsVolatile; }
void setVolatile(bool V) { IsVolatile = V; }
SourceLocation getBeginLoc() const LLVM_READONLY { return {}; }
SourceLocation getEndLoc() const LLVM_READONLY { return {}; }
//===--- Asm String Analysis ---===//
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
unsigned getNumOutputs() const { return NumOutputs; }
/// getOutputConstraint - Return the constraint string for the specified
/// output operand. All output constraints are known to be non-empty (either
/// '=' or '+').
StringRef getOutputConstraint(unsigned i) const;
/// isOutputPlusConstraint - Return true if the specified output constraint
/// is a "+" constraint (which is both an input and an output) or false if it
/// is an "=" constraint (just an output).
bool isOutputPlusConstraint(unsigned i) const {
return getOutputConstraint(i)[0] == '+';
}
const Expr *getOutputExpr(unsigned i) const;
/// getNumPlusOperands - Return the number of output operands that have a "+"
/// constraint.
unsigned getNumPlusOperands() const;
//===--- Input operands ---===//
unsigned getNumInputs() const { return NumInputs; }
/// getInputConstraint - Return the specified input constraint. Unlike output
/// constraints, these can be empty.
StringRef getInputConstraint(unsigned i) const;
const Expr *getInputExpr(unsigned i) const;
//===--- Other ---===//
unsigned getNumClobbers() const { return NumClobbers; }
StringRef getClobber(unsigned i) const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass ||
T->getStmtClass() == MSAsmStmtClass;
}
// Input expr iterators.
using inputs_iterator = ExprIterator;
using const_inputs_iterator = ConstExprIterator;
using inputs_range = llvm::iterator_range<inputs_iterator>;
using inputs_const_range = llvm::iterator_range<const_inputs_iterator>;
inputs_iterator begin_inputs() {
return &Exprs[0] + NumOutputs;
}
inputs_iterator end_inputs() {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); }
const_inputs_iterator begin_inputs() const {
return &Exprs[0] + NumOutputs;
}
const_inputs_iterator end_inputs() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_const_range inputs() const {
return inputs_const_range(begin_inputs(), end_inputs());
}
// Output expr iterators.
using outputs_iterator = ExprIterator;
using const_outputs_iterator = ConstExprIterator;
using outputs_range = llvm::iterator_range<outputs_iterator>;
using outputs_const_range = llvm::iterator_range<const_outputs_iterator>;
outputs_iterator begin_outputs() {
return &Exprs[0];
}
outputs_iterator end_outputs() {
return &Exprs[0] + NumOutputs;
}
outputs_range outputs() {
return outputs_range(begin_outputs(), end_outputs());
}
const_outputs_iterator begin_outputs() const {
return &Exprs[0];
}
const_outputs_iterator end_outputs() const {
return &Exprs[0] + NumOutputs;
}
outputs_const_range outputs() const {
return outputs_const_range(begin_outputs(), end_outputs());
}
child_range children() {
return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
};
/// This represents a GCC inline-assembly statement extension.
class GCCAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation RParenLoc;
StringLiteral *AsmStr;
// FIXME: If we wanted to, we could allocate all of these in one big array.
StringLiteral **Constraints = nullptr;
StringLiteral **Clobbers = nullptr;
IdentifierInfo **Names = nullptr;
unsigned NumLabels = 0;
public:
GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple,
bool isvolatile, unsigned numoutputs, unsigned numinputs,
IdentifierInfo **names, StringLiteral **constraints, Expr **exprs,
StringLiteral *asmstr, unsigned numclobbers,
StringLiteral **clobbers, unsigned numlabels,
SourceLocation rparenloc);
/// Build an empty inline-assembly statement.
explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {}
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
//===--- Asm String Analysis ---===//
const StringLiteral *getAsmString() const { return AsmStr; }
StringLiteral *getAsmString() { return AsmStr; }
void setAsmString(StringLiteral *E) { AsmStr = E; }
/// AsmStringPiece - this is part of a decomposed asm string specification
/// (for use with the AnalyzeAsmString function below). An asm string is
/// considered to be a concatenation of these parts.
class AsmStringPiece {
public:
enum Kind {
String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%".
Operand // Operand reference, with optional modifier %c4.
};
private:
Kind MyKind;
std::string Str;
unsigned OperandNo;
// Source range for operand references.
CharSourceRange Range;
public:
AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {}
AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin,
SourceLocation End)
: MyKind(Operand), Str(S), OperandNo(OpNo),
Range(CharSourceRange::getCharRange(Begin, End)) {}
bool isString() const { return MyKind == String; }
bool isOperand() const { return MyKind == Operand; }
const std::string &getString() const { return Str; }
unsigned getOperandNo() const {
assert(isOperand());
return OperandNo;
}
CharSourceRange getRange() const {
assert(isOperand() && "Range is currently used only for Operands.");
return Range;
}
/// getModifier - Get the modifier for this operand, if present. This
/// returns '\0' if there was no modifier.
char getModifier() const;
};
/// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing
/// it into pieces. If the asm string is erroneous, emit errors and return
/// true, otherwise return false. This handles canonicalization and
/// translation of strings from GCC syntax to LLVM IR syntax, and handles
//// flattening of named references like %[foo] to Operand AsmStringPiece's.
unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces,
const ASTContext &C, unsigned &DiagOffs) const;
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; }
StringRef getOutputName(unsigned i) const {
if (IdentifierInfo *II = getOutputIdentifier(i))
return II->getName();
return {};
}
StringRef getOutputConstraint(unsigned i) const;
const StringLiteral *getOutputConstraintLiteral(unsigned i) const {
return Constraints[i];
}
StringLiteral *getOutputConstraintLiteral(unsigned i) {
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
IdentifierInfo *getInputIdentifier(unsigned i) const {
return Names[i + NumOutputs];
}
StringRef getInputName(unsigned i) const {
if (IdentifierInfo *II = getInputIdentifier(i))
return II->getName();
return {};
}
StringRef getInputConstraint(unsigned i) const;
const StringLiteral *getInputConstraintLiteral(unsigned i) const {
return Constraints[i + NumOutputs];
}
StringLiteral *getInputConstraintLiteral(unsigned i) {
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getInputExpr(i);
}
//===--- Labels ---===//
bool isAsmGoto() const {
return NumLabels > 0;
}
unsigned getNumLabels() const {
return NumLabels;
}
IdentifierInfo *getLabelIdentifier(unsigned i) const {
return Names[i + NumOutputs + NumInputs];
}
AddrLabelExpr *getLabelExpr(unsigned i) const;
StringRef getLabelName(unsigned i) const;
using labels_iterator = CastIterator<AddrLabelExpr>;
using const_labels_iterator = ConstCastIterator<AddrLabelExpr>;
using labels_range = llvm::iterator_range<labels_iterator>;
using labels_const_range = llvm::iterator_range<const_labels_iterator>;
labels_iterator begin_labels() {
return &Exprs[0] + NumOutputs + NumInputs;
}
labels_iterator end_labels() {
return &Exprs[0] + NumOutputs + NumInputs + NumLabels;
}
labels_range labels() {
return labels_range(begin_labels(), end_labels());
}
const_labels_iterator begin_labels() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
const_labels_iterator end_labels() const {
return &Exprs[0] + NumOutputs + NumInputs + NumLabels;
}
labels_const_range labels() const {
return labels_const_range(begin_labels(), end_labels());
}
private:
void setOutputsAndInputsAndClobbers(const ASTContext &C,
IdentifierInfo **Names,
StringLiteral **Constraints,
Stmt **Exprs,
unsigned NumOutputs,
unsigned NumInputs,
unsigned NumLabels,
StringLiteral **Clobbers,
unsigned NumClobbers);
public:
//===--- Other ---===//
/// getNamedOperand - Given a symbolic operand reference like %[foo],
/// translate this into a numeric value needed to reference the same operand.
/// This returns -1 if the operand name is invalid.
int getNamedOperand(StringRef SymbolicName) const;
StringRef getClobber(unsigned i) const;
StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; }
const StringLiteral *getClobberStringLiteral(unsigned i) const {
return Clobbers[i];
}
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass;
}
};
/// This represents a Microsoft inline-assembly statement extension.
class MSAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation LBraceLoc, EndLoc;
StringRef AsmStr;
unsigned NumAsmToks = 0;
Token *AsmToks = nullptr;
StringRef *Constraints = nullptr;
StringRef *Clobbers = nullptr;
public:
MSAsmStmt(const ASTContext &C, SourceLocation asmloc,
SourceLocation lbraceloc, bool issimple, bool isvolatile,
ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs,
ArrayRef<StringRef> constraints,
ArrayRef<Expr*> exprs, StringRef asmstr,
ArrayRef<StringRef> clobbers, SourceLocation endloc);
/// Build an empty MS-style inline-assembly statement.
explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {}
SourceLocation getLBraceLoc() const { return LBraceLoc; }
void setLBraceLoc(SourceLocation L) { LBraceLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
bool hasBraces() const { return LBraceLoc.isValid(); }
unsigned getNumAsmToks() { return NumAsmToks; }
Token *getAsmToks() { return AsmToks; }
//===--- Asm String Analysis ---===//
StringRef getAsmString() const { return AsmStr; }
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
StringRef getOutputConstraint(unsigned i) const {
assert(i < NumOutputs);
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
StringRef getInputConstraint(unsigned i) const {
assert(i < NumInputs);
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getInputExpr(i);
}
//===--- Other ---===//
ArrayRef<StringRef> getAllConstraints() const {
return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs);
}
ArrayRef<StringRef> getClobbers() const {
return llvm::makeArrayRef(Clobbers, NumClobbers);
}
ArrayRef<Expr*> getAllExprs() const {
return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs),
NumInputs + NumOutputs);
}
StringRef getClobber(unsigned i) const { return getClobbers()[i]; }
private:
void initialize(const ASTContext &C, StringRef AsmString,
ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints,
ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers);
public:
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == MSAsmStmtClass;
}
child_range children() {
return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
};
class SEHExceptStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Children[2];
enum { FILTER_EXPR, BLOCK };
SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block);
explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {}
public:
static SEHExceptStmt* Create(const ASTContext &C,
SourceLocation ExceptLoc,
Expr *FilterExpr,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); }
SourceLocation getExceptLoc() const { return Loc; }
SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); }
Expr *getFilterExpr() const {
return reinterpret_cast<Expr*>(Children[FILTER_EXPR]);
}
CompoundStmt *getBlock() const {
return cast<CompoundStmt>(Children[BLOCK]);
}
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHExceptStmtClass;
}
};
class SEHFinallyStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Block;
SEHFinallyStmt(SourceLocation Loc, Stmt *Block);
explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {}
public:
static SEHFinallyStmt* Create(const ASTContext &C,
SourceLocation FinallyLoc,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); }
SourceLocation getFinallyLoc() const { return Loc; }
SourceLocation getEndLoc() const { return Block->getEndLoc(); }
CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); }
child_range children() {
return child_range(&Block,&Block+1);
}
const_child_range children() const {
return const_child_range(&Block, &Block + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHFinallyStmtClass;
}
};
class SEHTryStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
bool IsCXXTry;
SourceLocation TryLoc;
Stmt *Children[2];
enum { TRY = 0, HANDLER = 1 };
SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try'
SourceLocation TryLoc,
Stmt *TryBlock,
Stmt *Handler);
explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {}
public:
static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry,
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); }
SourceLocation getTryLoc() const { return TryLoc; }
SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); }
bool getIsCXXTry() const { return IsCXXTry; }
CompoundStmt* getTryBlock() const {
return cast<CompoundStmt>(Children[TRY]);
}
Stmt *getHandler() const { return Children[HANDLER]; }
/// Returns 0 if not defined
SEHExceptStmt *getExceptHandler() const;
SEHFinallyStmt *getFinallyHandler() const;
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHTryStmtClass;
}
};
/// Represents a __leave statement.
class SEHLeaveStmt : public Stmt {
SourceLocation LeaveLoc;
public:
explicit SEHLeaveStmt(SourceLocation LL)
: Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {}
/// Build an empty __leave statement.
explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {}
SourceLocation getLeaveLoc() const { return LeaveLoc; }
void setLeaveLoc(SourceLocation L) { LeaveLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHLeaveStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// This captures a statement into a function. For example, the following
/// pragma annotated compound statement can be represented as a CapturedStmt,
/// and this compound statement is the body of an anonymous outlined function.
/// @code
/// #pragma omp parallel
/// {
/// compute();
/// }
/// @endcode
class CapturedStmt : public Stmt {
public:
/// The different capture forms: by 'this', by reference, capture for
/// variable-length array type etc.
enum VariableCaptureKind {
VCK_This,
VCK_ByRef,
VCK_ByCopy,
VCK_VLAType,
};
/// Describes the capture of either a variable, or 'this', or
/// variable-length array type.
class Capture {
llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind;
SourceLocation Loc;
public:
friend class ASTStmtReader;
/// Create a new capture.
///
/// \param Loc The source location associated with this capture.
///
/// \param Kind The kind of capture (this, ByRef, ...).
///
/// \param Var The variable being captured, or null if capturing this.
Capture(SourceLocation Loc, VariableCaptureKind Kind,
VarDecl *Var = nullptr);
/// Determine the kind of capture.
VariableCaptureKind getCaptureKind() const;
/// Retrieve the source location at which the variable or 'this' was
/// first used.
SourceLocation getLocation() const { return Loc; }
/// Determine whether this capture handles the C++ 'this' pointer.
bool capturesThis() const { return getCaptureKind() == VCK_This; }
/// Determine whether this capture handles a variable (by reference).
bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; }
/// Determine whether this capture handles a variable by copy.
bool capturesVariableByCopy() const {
return getCaptureKind() == VCK_ByCopy;
}
/// Determine whether this capture handles a variable-length array
/// type.
bool capturesVariableArrayType() const {
return getCaptureKind() == VCK_VLAType;
}
/// Retrieve the declaration of the variable being captured.
///
/// This operation is only valid if this capture captures a variable.
VarDecl *getCapturedVar() const;
};
private:
/// The number of variable captured, including 'this'.
unsigned NumCaptures;
/// The pointer part is the implicit the outlined function and the
/// int part is the captured region kind, 'CR_Default' etc.
llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind;
/// The record for captured variables, a RecordDecl or CXXRecordDecl.
RecordDecl *TheRecordDecl = nullptr;
/// Construct a captured statement.
CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD);
/// Construct an empty captured statement.
CapturedStmt(EmptyShell Empty, unsigned NumCaptures);
Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); }
Stmt *const *getStoredStmts() const {
return reinterpret_cast<Stmt *const *>(this + 1);
}
Capture *getStoredCaptures() const;
void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; }
public:
friend class ASTStmtReader;
static CapturedStmt *Create(const ASTContext &Context, Stmt *S,
CapturedRegionKind Kind,
ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits,
CapturedDecl *CD, RecordDecl *RD);
static CapturedStmt *CreateDeserialized(const ASTContext &Context,
unsigned NumCaptures);
/// Retrieve the statement being captured.
Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; }
const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; }
/// Retrieve the outlined function declaration.
CapturedDecl *getCapturedDecl();
const CapturedDecl *getCapturedDecl() const;
/// Set the outlined function declaration.
void setCapturedDecl(CapturedDecl *D);
/// Retrieve the captured region kind.
CapturedRegionKind getCapturedRegionKind() const;
/// Set the captured region kind.
void setCapturedRegionKind(CapturedRegionKind Kind);
/// Retrieve the record declaration for captured variables.
const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; }
/// Set the record declaration for captured variables.
void setCapturedRecordDecl(RecordDecl *D) {
assert(D && "null RecordDecl");
TheRecordDecl = D;
}
/// True if this variable has been captured.
bool capturesVariable(const VarDecl *Var) const;
/// An iterator that walks over the captures.
using capture_iterator = Capture *;
using const_capture_iterator = const Capture *;
using capture_range = llvm::iterator_range<capture_iterator>;
using capture_const_range = llvm::iterator_range<const_capture_iterator>;
capture_range captures() {
return capture_range(capture_begin(), capture_end());
}
capture_const_range captures() const {
return capture_const_range(capture_begin(), capture_end());
}
/// Retrieve an iterator pointing to the first capture.
capture_iterator capture_begin() { return getStoredCaptures(); }
const_capture_iterator capture_begin() const { return getStoredCaptures(); }
/// Retrieve an iterator pointing past the end of the sequence of
/// captures.
capture_iterator capture_end() const {
return getStoredCaptures() + NumCaptures;
}
/// Retrieve the number of captures, including 'this'.
unsigned capture_size() const { return NumCaptures; }
/// Iterator that walks over the capture initialization arguments.
using capture_init_iterator = Expr **;
using capture_init_range = llvm::iterator_range<capture_init_iterator>;
/// Const iterator that walks over the capture initialization
/// arguments.
using const_capture_init_iterator = Expr *const *;
using const_capture_init_range =
llvm::iterator_range<const_capture_init_iterator>;
capture_init_range capture_inits() {
return capture_init_range(capture_init_begin(), capture_init_end());
}
const_capture_init_range capture_inits() const {
return const_capture_init_range(capture_init_begin(), capture_init_end());
}
/// Retrieve the first initialization argument.
capture_init_iterator capture_init_begin() {
return reinterpret_cast<Expr **>(getStoredStmts());
}
const_capture_init_iterator capture_init_begin() const {
return reinterpret_cast<Expr *const *>(getStoredStmts());
}
/// Retrieve the iterator pointing one past the last initialization
/// argument.
capture_init_iterator capture_init_end() {
return capture_init_begin() + NumCaptures;
}
const_capture_init_iterator capture_init_end() const {
return capture_init_begin() + NumCaptures;
}
SourceLocation getBeginLoc() const LLVM_READONLY {
return getCapturedStmt()->getBeginLoc();
}
SourceLocation getEndLoc() const LLVM_READONLY {
return getCapturedStmt()->getEndLoc();
}
SourceRange getSourceRange() const LLVM_READONLY {
return getCapturedStmt()->getSourceRange();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CapturedStmtClass;
}
child_range children();
const_child_range children() const;
};
} // namespace clang
#endif // LLVM_CLANG_AST_STMT_H
|
GB_unop__identity_fp32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_fp32_fc32
// op(A') function: GB_unop_tran__identity_fp32_fc32
// C type: float
// A type: GxB_FC32_t
// cast: float cij = (float) crealf (aij)
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
float z = (float) crealf (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = (float) crealf (aij) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_fp32_fc32
(
float *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
float z = (float) crealf (aij) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
float z = (float) crealf (aij) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_fp32_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_single_nowait.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include "omp_testsuite.h"
/*
* This test will hang if the nowait is not working properly
*
* It relies on a one thread skipping to the last single construct to
* release the threads in the first three single constructs
*/
volatile int release;
volatile int count;
void wait_for_release_then_increment(int rank)
{
fprintf(stderr, "Thread nr %d enters first section"
" and waits.\n", rank);
while (release == 0)
THREAD_SCHED_POINT();
#pragma omp atomic
count++;
}
void release_and_increment(int rank)
{
fprintf(stderr, "Thread nr %d sets release to 1\n", rank);
release = 1;
#pragma omp atomic
count++;
}
int test_omp_single_nowait()
{
release = 0;
count = 0;
#pragma omp parallel num_threads(4)
{
int rank;
rank = omp_get_thread_num ();
#pragma omp single nowait
{
wait_for_release_then_increment(rank);
}
#pragma omp single nowait
{
wait_for_release_then_increment(rank);
}
#pragma omp single nowait
{
wait_for_release_then_increment(rank);
}
#pragma omp single
{
release_and_increment(rank);
}
}
// Check to make sure all four singles were executed
return (count==4);
} /* end of check_single_nowait*/
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_single_nowait()) {
num_failed++;
}
}
return num_failed;
}
|
md5_bmark.c | /*
* MD5 Benchmark
* -------------
* File: md5_bmark.c
*
* This is the main file for the md5 benchmark kernel. This benchmark was
* written as part of the StarBENCH benchmark suite at TU Berlin. It performs
* MD5 computation on a number of self-generated input buffers in parallel,
* automatically measuring execution time.
*
* Copyright (C) 2011 Michael Andersch
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <unistd.h>
#include <string.h>
#include <sys/time.h>
#include <omp.h>
#include "md5.h"
#include "md5_bmark.h"
typedef struct timeval timer;
#define TIME(x) gettimeofday(&x, NULL)
/* Function declarations */
int initialize(md5bench_t* args);
int finalize(md5bench_t* args);
void run(md5bench_t* args);
void process(uint8_t* in, uint8_t* out, int bufsize);
void listInputs();
long timediff(timer* starttime, timer* finishtime);
// Input configurations
static data_t datasets[] = {
{64, 512, 0},
{64, 1024, 0},
{64, 2048, 0},
{64, 4096, 0},
{128, 1024*512, 1},
{128, 1024*1024, 1},
{128, 1024*2048, 1},
{128, 1024*4096, 1},
};
/*
* Function: initialize
* --------------------
* To initialize the benchmark parameters. Generates the input buffers from random data.
*/
int initialize(md5bench_t* args) {
int index = args->input_set;
if(index < 0 || index >= sizeof(datasets)/sizeof(datasets[0])) {
fprintf(stderr, "Invalid input set specified! Clamping to set 0\n");
index = 0;
}
args->numinputs = datasets[index].numbufs;
args->size = datasets[index].bufsize;
args->inputs = (uint8_t**)calloc(args->numinputs, sizeof(uint8_t*));
args->out = (uint8_t*)calloc(args->numinputs, DIGEST_SIZE);
if(args->inputs == NULL || args->out == NULL) {
fprintf(stderr, "Memory Allocation Error\n");
return -1;
}
//fprintf(stderr, "Reading input set: %d buffers, %d bytes per buffer\n", datasets[index].numbufs, datasets[index].bufsize);
// Now the input buffers need to be generated, for replicability, use same seed
srand(datasets[index].rseed);
for(int i = 0; i < args->numinputs; i++) {
args->inputs[i] = (uint8_t*)malloc(sizeof(uint8_t)*datasets[index].bufsize);
uint8_t* p = args->inputs[i];
if(p == NULL) {
fprintf(stderr, "Memory Allocation Error\n");
return -1;
}
for(int j = 0; j < datasets[index].bufsize; j++)
*p++ = rand() % 255;
}
return 0;
}
/*
* Function: process
* -----------------
* Processes one input buffer, delivering the digest into out.
*/
void process(uint8_t* in, uint8_t* out, int bufsize) {
MD5_CTX context;
uint8_t digest[16];
#pragma omp parallel
{
#pragma omp task
MD5_Init(&context);
#pragma omp task
MD5_Update(&context, in, bufsize);
#pragma omp task
MD5_Final(digest, &context);
}
memcpy(out, digest, DIGEST_SIZE);
}
/*
* Function: run
* --------------------
* Main benchmarking function. If called, processes buffers with MD5
* until no more buffers available. The resulting message digests
* are written into consecutive locations in the preallocated output
* buffer.
*/
void run(md5bench_t* args) {
#pragma omp parallel
{
#pragma omp single nowait
{
for(int i = 0; i < args->iterations; i++)
{
#pragma omp task
{
int buffers_to_process = args->numinputs;
int next = 0;
uint8_t** in = args->inputs;
uint8_t* out = args->out;
while(buffers_to_process > 0)
{
process(in[next], out+next*DIGEST_SIZE, args->size);
next++;
buffers_to_process--;
}
}
}
}
}
}
/*
* Function: finalize
* ------------------
* Cleans up memory used by the benchmark for input and output buffers.
*/
int finalize(md5bench_t* args) {
char buffer[64];
int offset = 0;
for(int i = 0; i < args->numinputs; i++) {
#ifdef DEBUG
sprintf(buffer, "Buffer %d has checksum ", i);
fwrite(buffer, sizeof(char), strlen(buffer)+1, stdout);
#endif
for(int j = 0; j < DIGEST_SIZE*2; j+=2) {
sprintf(buffer+j, "%x", args->out[DIGEST_SIZE*i+j/2] & 0xf);
sprintf(buffer+j+1, "%x", args->out[DIGEST_SIZE*i+j/2] & 0xf0);
}
buffer[32] = '\0';
#ifdef DEBUG
fwrite(buffer, sizeof(char), 32, stdout);
fputc('\n', stdout);
#else
printf("%s ", buffer);
#endif
}
#ifndef DEBUG
printf("\n");
#endif
if(args->inputs) {
for(int i = 0; i < args->numinputs; i++) {
if(args->inputs[i])
free(args->inputs[i]);
}
free(args->inputs);
}
if(args->out)
free(args->out);
return 0;
}
/*
* Function: timediff
* ------------------
* Compute the difference between timers starttime and finishtime in msecs.
*/
long timediff(timer* starttime, timer* finishtime)
{
long msec;
msec=(finishtime->tv_sec-starttime->tv_sec)*1000;
msec+=(finishtime->tv_usec-starttime->tv_usec)/1000;
return msec;
}
/** MAIN **/
int main(int argc, char** argv) {
timer b_start, b_end;
md5bench_t args;
//nt = number of threads
int nt;
//Receber parâmetros
scanf("%d", &nt);
scanf("%d", &args.input_set);
scanf("%d", &args.iterations);
args.outflag = 1;
omp_set_num_threads(nt);
// Parameter initialization
if(initialize(&args)) {
fprintf(stderr, "Initialization Error\n");
exit(EXIT_FAILURE);
}
TIME(b_start);
run(&args);
TIME(b_end);
// Free memory
if(finalize(&args)) {
fprintf(stderr, "Finalization Error\n");
exit(EXIT_FAILURE);
}
double b_time = (double)timediff(&b_start, &b_end)/1000;
printf("%.3f\n", b_time);
return 0;
}
/*
# Results
## System Info
Operating System: 3.19.0-58-generic NAME="Ubuntu" VERSION="14.04.4 LTS,
Trusty Tahr" ID=ubuntu ID_LIKE=debian
PRETTY_NAME="Ubuntu 14.04.4 LTS" VERSION_ID="14.04"
CPU Name: 4th generation Intel(R) Core(TM) Processor family
Frequency: 2.4 GHz
Logical CPU Count: 8
## Sequential Program Version
Elapsed Time: 11.157s
Clockticks: 36,426,054,639
Instructions Retired: 63,922,095,883
CPI Rate: 0.570
MUX Reliability: 0.974
Front-End Bound: 6.4%
Bad Speculation: 0.4%
Back-End Bound: 45.4%
Memory Bound: 1.9%
L1 Bound: 0.028
L3 Bound:
Contested Accesses: 0.000
Data Sharing: 0.000
LLC Hit: 0.000
SQ Full: 0.000
DRAM Bound:
Memory Latency:
LLC Miss: 0.000
Store Bound: 0.000
Core Bound: 43.5%
Divider: 0.000
Port Utilization: 0.693
Cycles of 0 Ports Utilized: 0.034
Cycles of 1 Port Utilized: 0.451
Cycles of 2 Ports Utilized: 0.237
Cycles of 3+ Ports Utilized: 0.224
Retiring: 47.8%
Total Thread Count: 1
Paused Time: 0s
## Parallel Program Version
Elapsed Time: 4.853s
Clockticks: 36,484,054,726
Instructions Retired: 63,944,095,916
CPI Rate: 0.571
MUX Reliability: 0.926
Front-End Bound: 3.4%
Bad Speculation: 0.4%
Back-End Bound: 46.3%
Memory Bound: 1.5%
L1 Bound: 0.024
L3 Bound:
Contested Accesses: 0.000
Data Sharing: 0.000
LLC Hit: 0.000
SQ Full: 0.000
DRAM Bound:
Memory Latency:
LLC Miss: 0.009
Store Bound: 0.001
Core Bound: 44.7%
Divider: 0.000
Port Utilization: 0.735
Cycles of 0 Ports Utilized: 0.025
Cycles of 1 Port Utilized: 0.475
Cycles of 2 Ports Utilized: 0.259
Cycles of 3+ Ports Utilized: 0.218
Retiring: 50.0%
Total Thread Count: 4
Paused Time: 0s
*/
|
GB_unop__identity_bool_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_bool_uint32)
// op(A') function: GB (_unop_tran__identity_bool_uint32)
// C type: bool
// A type: uint32_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
bool z = (bool) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = (bool) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_bool_uint32)
(
bool *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
bool z = (bool) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint32_t aij = Ax [p] ;
bool z = (bool) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_bool_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mat_mul_simd_10000.c | /*
* file for mat_mul.c
*/
#include "./mat_mul.h"
#include "./size.h"
void mat_mul(int *a, int *b, int *c);
void mat_mul(int *a, int *b, int *c)
{
int i, j, k, t;
#pragma omp simd private(j, t, k)
for(i = 0; i <= 9999; i += 1)
for(j = 0; j <= 9999; j += 1) {
c[i*10000+j] = 0;
for(k = 0; k <= 9999; k += 1)
for(t = 0; t <= 99; t += 1)
c[i*10000+j] += a[i*10000+k]*b[j*10000+k];
}
return;
}
|
DRB045-doall1-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
/*
Simplest one dimension array computation
*/
int a[100];
int main()
{
int i;
#pragma omp target data map(tofrom:a[0:100])
#pragma omp target parallel for
for (i=0;i<100;i++)
a[i]=i;
#pragma omp target data map(tofrom:a[0:100])
#pragma omp target parallel for
for (i=0;i<100;i++)
a[i]=a[i]+1;
for (i=0;i<100;i++)
printf("%d\n",a[i]);
return 0;
}
|
vec2d_mult.c | //////////////////////////////////////////////////////////////
// ____ //
// | __ ) ___ _ __ ___ _ _ _ __ ___ _ __ _ __ ___ //
// | _ \ / _ \ '_ \/ __| | | | '_ \ / _ \ '__| '_ \ / __| //
// | |_) | __/ | | \__ \ |_| | |_) | __/ | | |_) | (__ //
// |____/ \___|_| |_|___/\__,_| .__/ \___|_| | .__/ \___| //
// |_| |_| //
//////////////////////////////////////////////////////////////
// //
// BenLib, 2021 //
// Created: 17, March, 2021 //
// Modified: 17, March, 2021 //
// file: OpenCL_test.cpp //
// Crypto //
// Source: https://github.com/Kaixhin/cuda-workshop //
// https://forums.developer.nvidia.com/t/double-pointer-allocation/9390 //
// https://stackoverflow.com/a/31382775/10152334 //
// CPU: ALL //
// //
//////////////////////////////////////////////////////////////
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "kernel.h"
const int TILE_WIDTH = 16;
#define THREADS_PER_BLOCK 1024
void matrixMultiplyCPU(float *a, float *b, float *c, size_t width);
void matrixMultiplyCPU(float *a, float *b, float *c, size_t width)
{
float result;
for (size_t row = 0; row < width; row++) {
for (size_t col = 0; col < width; col++) {
result = 0;
for (size_t k = 0; k < width; k++) {
result += a[row * width + k] * b[k * width + col];
}
c[row * width + col] = result;
}
}
}
void matrixMultiplyCPU_MP(float *a, float *b, float *c, size_t width);
void matrixMultiplyCPU_MP(float *a, float *b, float *c, size_t width)
{
float result = 0.0;
//#pragma omp parallel
#pragma omp parallel for collapse(2) private(result)
for (size_t row = 0; row < width; row++) {
for (size_t col = 0; col < width; col++) {
result = 0;
for (size_t k = 0; k < width; k++) {
result += a[row * width + k] * b[k * width + col];
}
c[row * width + col] = result;
}
}
}
int main()
{
size_t width = 2000; // Define width of square matrix
// Initialise grid and block variables
int sqrtThreads = sqrt(THREADS_PER_BLOCK);
size_t nBlocks = width / sqrtThreads;
if (width % sqrtThreads != 0) { // Add an extra block if necessary
nBlocks++;
}
int i_mult = 1;
// dim3 grid(nBlocks, nBlocks, i_mult);
dim3 grid = {nBlocks, nBlocks, i_mult};
// dim3 block(sqrtThreads, sqrtThreads, i_mult); // Max number of threads per block
dim3 block = {sqrtThreads, sqrtThreads, i_mult};
// Initialise host pointers (dynamically allocated memory) and device pointers
float *a_h;
float *b_h;
float *c_h; // GPU results
float *d_h; // CPU results
float *a_d;
float *b_d;
float *c_d;
size_t size; // Number of bytes required by arrays
// Create timer
cudaEvent_t start;
cudaEvent_t stop;
float elapsed1, elapsed2, elapsed3;
// Print out information about blocks and threads
printf("Number of threads: %i (%ix%i)\n", block.x * block.y, block.x, block.y);
printf("Number of blocks: %i (%ix%i)\n", grid.x * grid.y, grid.x, grid.y);
// Dynamically allocate host memory
size = width * width * sizeof(float);
a_h = (float *)malloc(size);
b_h = (float *)malloc(size);
c_h = (float *)malloc(size);
d_h = (float *)malloc(size);
// Load host arrays with data
for (size_t i = 0; i < width; i++) {
for (size_t j = 0; j < width; j++) {
a_h[i * width + j] = i;
b_h[i * width + j] = i;
}
}
// Allocate device memory
cudaMalloc((void **)&a_d, size);
cudaMalloc((void **)&b_d, size);
cudaMalloc((void **)&c_d, size);
// Copy host memory to device memory
cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(c_d, c_h, size, cudaMemcpyHostToDevice);
// Start timer for GPU
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// Launch kernel
// matrixMultiplySimple<<<grid, block>>>(a_d, b_d, c_d, width);
matrixMultiplySimple(grid, block, a_d, b_d, c_d, width);
// Stop timer
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed1, start, stop);
// Print execution time
printf("Time to calculate results on GPU: %f ms\n", elapsed1);
// Copy results to host
cudaMemcpy(c_h, c_d, size, cudaMemcpyDeviceToHost);
// Start timer for CPU
cudaEventRecord(start, 0);
// Launch CPU code
matrixMultiplyCPU_MP(a_h, b_h, d_h, width);
// Stop timer
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed2, start, stop);
// Print execution time
printf("Time to calculate results on CPU: %f ms\n", elapsed2);
// Compare results
for (size_t i = 0; i < width * width; i++) {
if (c_h[i] != d_h[i]) {
printf("Error: CPU and GPU results do not match\n");
break;
}
}
// Start timer for GPU (optimised)
cudaEventRecord(start, 0);
// Launch kernel (optimised)
// matrixMultiplyOptimised<<<grid, block>>>(a_h, b_h, c_h, width);
matrixMultiplyOptimised(grid, block, a_d, b_d, c_d, width);
// Stop timer
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed3, start, stop);
// Print execution time
printf("Time to calculate results on GPU (optimised): %f ms\n", elapsed3);
// Copy results to host
cudaMemcpy(c_h, c_d, size, cudaMemcpyDeviceToHost);
// Compare results
for (size_t i = 0; i < width * width; i++) {
if (c_h[i] != d_h[i]) {
printf("Error: CPU and GPU (optimised) results do not match\n");
break;
}
}
// Free memory
free(a_h);
free(b_h);
free(c_h);
free(d_h);
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
master.c | //////////////////////////////////////////////////////////////
//
// master.c
//
// Copyright (c) 2017, Hassan Salehe Matar
// All rights reserved.
//
// This file is part of Clanomp. For details, see
// https://github.com/hassansalehe/Clanomp. Please also
// see the LICENSE file for additional BSD notice
//
// Redistribution and use in source and binary forms, with
// or without modification, are permitted provided that
// the following conditions are met:
//
// * Redistributions of source code must retain the above
// copyright notice, this list of conditions and the
// following disclaimer.
//
// * Redistributions in binary form must reproduce the
// above copyright notice, this list of conditions and
// the following disclaimer in the documentation and/or
// other materials provided with the distribution.
//
// * Neither the name of the copyright holder nor the names
// of its contributors may be used to endorse or promote
// products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
// CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
// SUCH DAMAGE.
//
//////////////////////////////////////////////////////////////
// This program includes the "master" construct.
// The master thread prints its own thread id.
//
// References:
// 1. http://www.openmp.org/wp-content/uploads/openmp-examples-4.5.0.pdf
// 2. http://www.openmp.org/wp-content/uploads/openmp-4.5.pdf
#include <stdio.h>
#include <omp.h>
int main() {
int count = 0;
#pragma omp parallel shared(count)
{
#pragma omp master
{
count++;
}
}
printf("Value of count: %d, construct: <master>\n", count);
return 0;
}
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 4;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
atomic-13.c | /* PR middle-end/45423 */
/* { dg-do compile } */
/* { dg-options "-fopenmp -fdump-tree-gimple -g0 -O2 -Wno-deprecated" } */
/* atomicvar should never be referenced in between the barrier and
following #pragma omp atomic_load. */
/* { dg-final { scan-tree-dump-not "barrier\[^#\]*atomicvar" "gimple" } } */
/* { dg-skip-if "invalid in C++17" { c++17 } } */
#include "atomic-12.c"
|
mkldnn_graph.h | // Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <string>
#include <vector>
#include <memory>
#include <cpp_interfaces/impl/ie_executable_network_thread_safe_default.hpp>
#include "ie_parallel.hpp"
#include "mkldnn_memory.h"
#include "config.h"
#include "perf_count.h"
#include "mkldnn_dims.h"
#include "mean_image.h"
#include "mkldnn_node.h"
#include "mkldnn_edge.h"
#include "mkldnn_extension_utils.h"
#include "mkldnn_streams.h"
namespace MKLDNNPlugin {
class MKLDNNGraph {
public:
typedef std::shared_ptr<MKLDNNGraph> Ptr;
enum Status {
NotReady = 0,
Ready = 1,
};
MKLDNNGraph(): status(NotReady), eng(mkldnn::engine(mkldnn::engine::kind::cpu, 0)) {}
Status GetStatus() {
return status;
}
bool IsReady() {
return (GetStatus() == Ready);
}
void setConfig(const Config &cfg);
void setProperty(const std::map<std::string, std::string> &properties);
Config getProperty();
void getInputBlobs(InferenceEngine::BlobMap &in_map);
void getOutputBlobs(InferenceEngine::BlobMap &out_map);
void CreateGraph(const InferenceEngine::ICNNNetwork &network, const MKLDNNExtensionManager::Ptr& extMgr);
bool hasMeanImageFor(const std::string& name) {
return _meanImages.find(name) != _meanImages.end();
}
void PushInputData(const std::string& name, const InferenceEngine::Blob::Ptr &in);
void PullOutputData(InferenceEngine::BlobMap &out);
void Infer(int batch = -1);
std::vector<MKLDNNNodePtr>& GetNodes() {
return graphNodes;
}
std::vector<MKLDNNEdgePtr>& GetEdges() {
return graphEdges;
}
std::vector<MKLDNNNodePtr>& GetOutputNodes() {
return outputNodes;
}
mkldnn::engine getEngine() const {
return eng;
}
void GetPerfData(std::map<std::string, InferenceEngine::InferenceEngineProfileInfo> &perfMap) const;
void RemoveDroppedNodes();
void RemoveDroppedEdges();
void DropNode(const MKLDNNNodePtr& node);
void CreateArena(int threads_per_stream) {
#if IE_THREAD == IE_THREAD_OMP
omp_set_num_threads(threads_per_stream);
#elif IE_THREAD == IE_THREAD_TBB
ptrArena = std::unique_ptr<tbb::task_arena>(new tbb::task_arena(threads_per_stream));
#endif
}
void CreateObserver(int _stream_id, int _threads_per_stream, int _pinning_step = 1) {
#if IE_THREAD == IE_THREAD_TBB
ptrObserver
= std::unique_ptr<tbb::task_scheduler_observer>(
new pinning_observer(*ptrArena.get(), _stream_id, _threads_per_stream, _pinning_step));
#else
cpu_set_t *process_mask = nullptr;
int ncpus = 0;
get_process_mask(ncpus, process_mask);
#if IE_THREAD == IE_THREAD_OMP
#pragma omp parallel for
for (int thread_index = 0; thread_index < _threads_per_stream; thread_index++) {
pin_thread_to_vacant_core(_stream_id * _threads_per_stream + thread_index, 1, ncpus, process_mask);
}
#elif IE_THREAD == IE_THREAD_SEQ
pin_thread_to_vacant_core(_stream_id * _threads_per_stream, 1, ncpus, process_mask);
#endif
CPU_FREE(process_mask);
#endif
}
InferenceEngine::ICNNNetwork::Ptr dump() const;
protected:
void VisitNode(MKLDNNNodePtr node, std::vector<MKLDNNNodePtr>& sortedNodes);
void SortTopologically();
void ForgetGraphData() {
status = NotReady;
eng = mkldnn::engine(mkldnn::engine::kind::cpu, 0);
inputNodes.clear();
outputNodes.clear();
graphNodes.clear();
graphEdges.clear();
_meanImages.clear();
}
Status status;
Config config;
MKLDNNMemoryPtr memWorkspace;
std::map<std::string, MKLDNNNodePtr> inputNodes;
std::vector<MKLDNNNodePtr> outputNodes;
std::vector<MKLDNNNodePtr> graphNodes;
std::vector<MKLDNNEdgePtr> graphEdges;
std::map<std::string, MeanImage> _meanImages;
#if IE_THREAD == IE_THREAD_TBB
std::unique_ptr<tbb::task_arena> ptrArena;
std::unique_ptr<tbb::task_scheduler_observer> ptrObserver;
#endif
mkldnn::engine eng;
void Replicate(const ICNNNetwork &network, const MKLDNNExtensionManager::Ptr& extMgr);
void InitGraph();
void InitNodes();
void InitEdges();
void Allocate();
void AllocateWithReuse();
void CreatePrimitives();
void do_before(const std::string &dir, const MKLDNNNodePtr &node);
void do_after(const std::string &dir, const MKLDNNNodePtr &node);
friend class MKLDNNInferRequest;
friend class MKLDNNGraphlessInferRequest;
friend std::shared_ptr<InferenceEngine::ICNNNetwork> dump_graph_as_ie_net(const MKLDNNGraph &graph);
private:
void dumpToDotFile(std::string file) const;
struct ParsedLayer {
MKLDNNNodePtr parent;
InferenceEngine::CNNLayerPtr cnnLayer;
size_t outIdx;
};
};
class MKLDNNExecNetwork: public InferenceEngine::ExecutableNetworkThreadSafeDefault {
public:
typedef std::shared_ptr<MKLDNNExecNetwork> Ptr;
InferenceEngine::InferRequestInternal::Ptr CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs,
InferenceEngine::OutputsDataMap networkOutputs) override;
void CreateInferRequest(InferenceEngine::IInferRequest::Ptr &asyncRequest) override;
MKLDNNExecNetwork(const InferenceEngine::ICNNNetwork &network, const Config &cfg,
const MKLDNNExtensionManager::Ptr& extMgr);
~MKLDNNExecNetwork() {
graphs.clear();
extensionManager.reset();
}
void setProperty(const std::map<std::string, std::string> &properties);
void GetExecGraphInfo(InferenceEngine::ICNNNetwork::Ptr &graphPtr) override;
protected:
std::vector<MKLDNNGraph::Ptr> graphs;
MKLDNNExtensionManager::Ptr extensionManager;
bool CanProcessDynBatch(const InferenceEngine::ICNNNetwork &network) const;
};
} // namespace MKLDNNPlugin
|
a.13.1.c | /* { dg-do compile } */
int dequeue (float *a);
void work (int i, float *a);
void
a13 (float *x, float *y)
{
int ix_next, iy_next;
#pragma omp parallel shared(x, y) private(ix_next, iy_next)
{
#pragma omp critical (xaxis)
ix_next = dequeue (x);
work (ix_next, x);
#pragma omp critical (yaxis)
iy_next = dequeue (y);
work (iy_next, y);
}
}
|
fig4.16-sections.c | /*
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
Copyright 2009 Sun Microsystems, Inc. All rights reserved.
The contents of this file are subject to the terms of the BSD License("BSD")(the "License").
You can obtain a copy of the License at: http://www.opensparc.net/pubs/t1/licenses/BSD+_License.txt
The BSD License
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistribution of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistribution in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Sun Microsystems, Inc. or the names of
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
This software is provided "AS IS," without a warranty of any kind. ALL
EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY
IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR
NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN MICROSYSTEMS, INC. ("SUN") AND
ITS LICENSORS SHALL NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY LICENSEE AS A
RESULT OF USING, MODIFYING OR DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES.
IN NO EVENT WILL SUN OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT
OR DATA, OR FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR
PUNITIVE DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY,
ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF SUN HAS
BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
You acknowledge that this software is not designed, licensed or intended for
use in the design, construction, operation or maintenance of any nuclear facility.
*/
#include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#define TRUE 1
#define FALSE 0
#else
#define omp_get_thread_num() 0
#endif
void funcA();
void funcB();
int main()
{
#ifdef _OPENMP
(void) omp_set_dynamic(FALSE);
if (omp_get_dynamic()) {printf("Warning: dynamic adjustment of threads has been set\n");}
(void) omp_set_num_threads(4);
#endif
#pragma omp parallel
{
#pragma omp sections
{
#pragma omp section
(void) funcA();
#pragma omp section
(void) funcB();
} /*-- End of sections block --*/
} /*-- End of parallel region --*/
return(0);
}
void funcA()
{
printf("In funcA: this section is executed by thread %d\n",
omp_get_thread_num());
}
void funcB()
{
printf("In funcB: this section is executed by thread %d\n",
omp_get_thread_num());
}
|
GB_unop__identity_fc64_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc64_fp32)
// op(A') function: GB (_unop_tran__identity_fc64_fp32)
// C type: GxB_FC64_t
// A type: float
// cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc64_fp32)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc64_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_basic.c | /*******************************************************************************
* Copyright 2019 UChicago Argonne, LLC.
* (c.f. AUTHORS, LICENSE)
*
* This file is part of the nrm-extra project.
* For more info, see https://github.com/anlsys/nrm-extra
*
* SPDX-License-Identifier: BSD-3-Clause
*******************************************************************************/
#include <omp.h>
#include <stdio.h>
int main(int argc, char **argv)
{
int size, rank;
#pragma omp parallel
{
size = omp_get_num_threads();
rank = omp_get_thread_num();
}
fprintf(stdout, "Hello, I'm %u of %u\n", rank, size);
return 0;
}
|
omp_smithW-v4-parallel-serial.c | /*********************************************************************************
* Smith–Waterman algorithm
* Purpose: Local alignment of nucleotide or protein sequences
* Authors: Daniel Holanda, Hanoch Griner, Taynara Pinheiro
*
* Compilation: gcc omp_smithW.c -o omp_smithW -fopenmp -DDEBUG // debugging mode
* gcc omp_smithW.c -O3 -o omp_smithW -fopenmp // production run
* Execution: ./omp_smithW <number_of_col> <number_of_rows>
*
* Updated by C. Liao, Jan 2nd, 2019
*********************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include <time.h>
#include <assert.h>
#include <stdbool.h> // C99 does not support the boolean data type
#include "parameters.h"
/*--------------------------------------------------------------------
* Text Tweaks
*/
#define RESET "\033[0m"
#define BOLDRED "\033[1m\033[31m" /* Bold Red */
/* End of text tweaks */
/*--------------------------------------------------------------------
* Constants
*/
#define PATH -1
#define NONE 0
#define UP 1
#define LEFT 2
#define DIAGONAL 3
/* End of constants */
/*--------------------------------------------------------------------
* Helpers
*/
#define min(x, y) (((x) < (y)) ? (x) : (y))
#define max(a,b) ((a) > (b) ? a : b)
// #define DEBUG
/* End of Helpers */
/*--------------------------------------------------------------------
* Functions Prototypes
*/
void similarityScore(long long int i, long long int j, int* H, int* P, long long int* maxPos);
// without omp critical: how to conditionalize it?
void similarityScore2(long long int i, long long int j, int* H, int* P, long long int* maxPos);
int matchMissmatchScore(long long int i, long long int j);
void backtrack(int* P, long long int maxPos);
void printMatrix(int* matrix);
void printPredecessorMatrix(int* matrix);
void generate(void);
long long int nElement(long long int i);
void calcFirstDiagElement(long long int i, long long int *si, long long int *sj);
/* End of prototypes */
/*--------------------------------------------------------------------
* Global Variables
*/
bool useBuiltInData=true;
//Defines size of strings to be compared
long long int m = 8 ; //Columns - Size of string a
long long int n = 9; //Lines - Size of string b
// the generated scoring matrix's size is m++ and n++ later to have the first row/column as 0s.
//Defines scores
int matchScore = 3;
int missmatchScore = -3;
int gapScore = -2;
//Strings over the Alphabet Sigma
char *a, *b;
/* End of global variables */
/*--------------------------------------------------------------------
* Function: main
*/
int main(int argc, char* argv[]) {
// thread_count is no longer used
int thread_count;
if (argc==3)
{
m = strtoll(argv[1], NULL, 10);
n = strtoll(argv[2], NULL, 10);
useBuiltInData = false;
}
//#ifdef DEBUG
if (useBuiltInData)
printf ("Using built-in data for testing ..\n");
printf("Problem size: Matrix[%lld][%lld], FACTOR=%d CUTOFF=%d\n", n, m, FACTOR, CUTOFF);
//#endif
//Allocates a and b
a = (char*) malloc(m * sizeof(char));
b = (char*) malloc(n * sizeof(char));
//Because now we have zeros
m++;
n++;
//Allocates similarity matrix H
int *H;
H = (int *) calloc(m * n, sizeof(int));
//Allocates predecessor matrix P
int *P;
P = (int *)calloc(m * n, sizeof(int));
if (useBuiltInData)
{
//Uncomment this to test the sequence available at
//http://vlab.amrita.edu/?sub=3&brch=274&sim=1433&cnt=1
// OBS: m=11 n=7
// a[0] = 'C';
// a[1] = 'G';
// a[2] = 'T';
// a[3] = 'G';
// a[4] = 'A';
// a[5] = 'A';
// a[6] = 'T';
// a[7] = 'T';
// a[8] = 'C';
// a[9] = 'A';
// a[10] = 'T';
// b[0] = 'G';
// b[1] = 'A';
// b[2] = 'C';
// b[3] = 'T';
// b[4] = 'T';
// b[5] = 'A';
// b[6] = 'C';
// https://en.wikipedia.org/wiki/Smith%E2%80%93Waterman_algorithm#Example
// Using the wiki example to verify the results
b[0] = 'G';
b[1] = 'G';
b[2] = 'T';
b[3] = 'T';
b[4] = 'G';
b[5] = 'A';
b[6] = 'C';
b[7] = 'T';
b[8] = 'A';
a[0] = 'T';
a[1] = 'G';
a[2] = 'T';
a[3] = 'T';
a[4] = 'A';
a[5] = 'C';
a[6] = 'G';
a[7] = 'G';
}
else
{
//Gen random arrays a and b
generate();
}
//Start position for backtrack
long long int maxPos = 0;
//Calculates the similarity matrix
long long int i, j;
// The way to generate all wavefront is to go through the top edge elements
// starting from the left top of the matrix, go to the bottom top -> down, then left->right
// total top edge element count = dim1_size + dim2_size -1
//Because now we have zeros ((m-1) + (n-1) - 1)
long long int nDiag = m + n - 3;
#ifdef DEBUG
printf("nDiag=%d\n", nDiag);
printf("Number of wavefront lines and their first element positions:\n");
#endif
#pragma omp parallel
{
#pragma omp master
{
thread_count = omp_get_num_threads();
printf ("Using %d out of max %d threads...", thread_count, omp_get_max_threads());
}
}
//Gets Initial time
double initialTime = omp_get_wtime();
// #pragma omp parallel default(none) shared(H, P, maxPos, nDiag, j) private(i)
{
for (i = 1; i <= nDiag; ++i) // start from 1 since 0 is the boundary padding
{
long long int nEle, si, sj;
nEle = nElement(i);
calcFirstDiagElement(i, &si, &sj);
if (nEle>=CUTOFF)
{
#pragma omp parallel for private(j) shared (nEle, si, sj, H, P, maxPos)
for (j = 0; j < nEle; ++j)
{ // going upwards : anti-diagnol direction
long long int ai = si - j ; // going up vertically
long long int aj = sj + j; // going right in horizontal
similarityScore(ai, aj, H, P, &maxPos); // a critical section is used inside
}
}
else
{ // serial version, totally avoid parallel region creation.
for (j = 0; j < nEle; ++j)
{ // going upwards : anti-diagnol direction
long long int ai = si - j ; // going up vertically
long long int aj = sj + j; // going right in horizontal
similarityScore2(ai, aj, H, P, &maxPos); // a specialized version without a critical section used inside
}
}
} // for end nDiag
} // end omp parallel
double finalTime = omp_get_wtime();
printf("\nElapsed time for scoring matrix computation: %f\n", finalTime - initialTime);
initialTime = omp_get_wtime();
backtrack(P, maxPos);
finalTime = omp_get_wtime();
//Gets backtrack time
finalTime = omp_get_wtime();
printf("Elapsed time for backtracking: %f\n", finalTime - initialTime);
if (useBuiltInData)
{
printf ("Verifying results using the builtinIn data: %s\n", (H[n*m-1]==7)?"true":"false");
assert (H[n*m-1]==7);
}
#ifdef DEBUG
printf("\nSimilarity Matrix:\n");
printMatrix(H);
printf("\nPredecessor Matrix:\n");
printPredecessorMatrix(P);
#endif
//Frees similarity matrixes
free(H);
free(P);
//Frees input arrays
free(a);
free(b);
return 0;
} /* End of main */
/*--------------------------------------------------------------------
* Function: nElement
* Purpose: Calculate the number of i-diagonal's elements
* i value range 1 to nDiag. we inclulde the upper bound value. 0 is for the padded wavefront, which is ignored.
*/
long long int nElement(long long int i) {
if (i < m && i < n) { // smaller than both directions
//Number of elements in the diagonal is increasing
return i;
}
else if (i < max(m, n)) { // smaller than only one direction
//Number of elements in the diagonal is stable
long int min = min(m, n); // the longer direction has the edge elements, the number is the smaller direction's size
return min - 1;
}
else {
//Number of elements in the diagonal is decreasing
long int min = min(m, n);
return 2 * min - i + llabs(m - n) - 2;
}
}
/*--------------------------------------------------------------------
* Function: calcElement: expect valid i value is from 1 to nDiag. since the first one is 0 padding
* Purpose: Calculate the position of (si, sj)-element
* n rows, m columns: we sweep the matrix on the left edge then bottom edge to get the wavefront
*/
void calcFirstDiagElement(long long int i, long long int *si, long long int *sj) {
// Calculate the first element of diagonal
if (i < n) { // smaller than row count
*si = i;
*sj = 1; // start from the j==1 since j==0 is the padding
} else { // now we sweep horizontally at the bottom of the matrix
*si = n - 1; // i is fixed
*sj = i - n + 2; // j position is the nDiag (id -n) +1 +1 // first +1
}
}
/*
// understanding the calculation by an example
n =6 // row
m =2 // col
padded scoring matrix
n=7
m=3
0 1 2
-------
0 x x x
1 x x x
2 x x x
3 x x x
4 x x x
5 x x x
6 x x x
We should peel off top row and left column since they are the padding
the remaining 6x2 sub matrix is what is interesting for us
Now find the number of wavefront lines and their first element's position in the scoring matrix
total diagnol frontwave = (n-1) + (m-1) -1 // submatrix row+column -1
We use the left most element in each wavefront line as its first element.
Then we have the first elements like
(1,1),
(2,1)
(3,1)
..
(6,1) (6,2)
*/
/*--------------------------------------------------------------------
* Function: SimilarityScore
* Purpose: Calculate value of scoring matrix element H(i,j) : the maximum Similarity-Score H(i,j)
* int *P; the predecessor array,storing which of the three elements is picked with max value
*/
void similarityScore(long long int i, long long int j, int* H, int* P, long long int* maxPos) {
int up, left, diag;
//Stores index of element
long long int index = m * i + j;
//Get element above
up = H[index - m] + gapScore;
//Get element on the left
left = H[index - 1] + gapScore;
//Get element on the diagonal
diag = H[index - m - 1] + matchMissmatchScore(i, j);
//Calculates the maximum
int max = NONE;
int pred = NONE;
/* === Matrix ===
* a[0] ... a[n]
* b[0]
* ...
* b[n]
*
* generate 'a' from 'b', if '←' insert e '↑' remove
* a=GAATTCA
* b=GACTT-A
*
* generate 'b' from 'a', if '←' insert e '↑' remove
* b=GACTT-A
* a=GAATTCA
*/
if (diag > max) { //same letter ↖
max = diag;
pred = DIAGONAL;
}
if (up > max) { //remove letter ↑
max = up;
pred = UP;
}
if (left > max) { //insert letter ←
max = left;
pred = LEFT;
}
//Inserts the value in the similarity and predecessor matrixes
H[index] = max;
P[index] = pred;
//Updates maximum score to be used as seed on backtrack
#pragma omp critical
if (max > H[*maxPos]) {
*maxPos = index;
}
} /* End of similarityScore */
void similarityScore2(long long int i, long long int j, int* H, int* P, long long int* maxPos) {
int up, left, diag;
//Stores index of element
long long int index = m * i + j;
//Get element above
up = H[index - m] + gapScore;
//Get element on the left
left = H[index - 1] + gapScore;
//Get element on the diagonal
diag = H[index - m - 1] + matchMissmatchScore(i, j);
//Calculates the maximum
int max = NONE;
int pred = NONE;
/* === Matrix ===
* a[0] ... a[n]
* b[0]
* ...
* b[n]
*
* generate 'a' from 'b', if '←' insert e '↑' remove
* a=GAATTCA
* b=GACTT-A
*
* generate 'b' from 'a', if '←' insert e '↑' remove
* b=GACTT-A
* a=GAATTCA
*/
if (diag > max) { //same letter ↖
max = diag;
pred = DIAGONAL;
}
if (up > max) { //remove letter ↑
max = up;
pred = UP;
}
if (left > max) { //insert letter ←
max = left;
pred = LEFT;
}
//Inserts the value in the similarity and predecessor matrixes
H[index] = max;
P[index] = pred;
//Updates maximum score to be used as seed on backtrack
if (max > H[*maxPos]) {
*maxPos = index;
}
} /* End of similarityScore2 */
/*--------------------------------------------------------------------
* Function: matchMissmatchScore
* Purpose: Similarity function on the alphabet for match/missmatch
*/
int matchMissmatchScore(long long int i, long long int j) {
if (a[j - 1] == b[i - 1])
return matchScore;
else
return missmatchScore;
} /* End of matchMissmatchScore */
/*--------------------------------------------------------------------
* Function: backtrack
* Purpose: Modify matrix to print, path change from value to PATH
*/
void backtrack(int* P, long long int maxPos) {
//hold maxPos value
long long int predPos;
//backtrack from maxPos to startPos = 0
do {
if (P[maxPos] == DIAGONAL)
predPos = maxPos - m - 1;
else if (P[maxPos] == UP)
predPos = maxPos - m;
else if (P[maxPos] == LEFT)
predPos = maxPos - 1;
P[maxPos] *= PATH;
maxPos = predPos;
} while (P[maxPos] != NONE);
} /* End of backtrack */
/*--------------------------------------------------------------------
* Function: printMatrix
* Purpose: Print Matrix
*/
void printMatrix(int* matrix) {
long long int i, j;
printf("-\t-\t");
for (j = 0; j < m-1; j++) {
printf("%c\t", a[j]);
}
printf("\n-\t");
for (i = 0; i < n; i++) { //Lines
for (j = 0; j < m; j++) {
if (j==0 && i>0) printf("%c\t", b[i-1]);
printf("%d\t", matrix[m * i + j]);
}
printf("\n");
}
} /* End of printMatrix */
/*--------------------------------------------------------------------
* Function: printPredecessorMatrix
* Purpose: Print predecessor matrix
*/
void printPredecessorMatrix(int* matrix) {
long long int i, j, index;
printf(" ");
for (j = 0; j < m-1; j++) {
printf("%c ", a[j]);
}
printf("\n ");
for (i = 0; i < n; i++) { //Lines
for (j = 0; j < m; j++) {
if (j==0 && i>0) printf("%c ", b[i-1]);
index = m * i + j;
if (matrix[index] < 0) {
printf(BOLDRED);
if (matrix[index] == -UP)
printf("↑ ");
else if (matrix[index] == -LEFT)
printf("← ");
else if (matrix[index] == -DIAGONAL)
printf("↖ ");
else
printf("- ");
printf(RESET);
} else {
if (matrix[index] == UP)
printf("↑ ");
else if (matrix[index] == LEFT)
printf("← ");
else if (matrix[index] == DIAGONAL)
printf("↖ ");
else
printf("- ");
}
}
printf("\n");
}
} /* End of printPredecessorMatrix */
/*--------------------------------------------------------------------
* Function: generate
* Purpose: Generate arrays a and b
*/
void generate() {
//Random seed
srand(time(NULL));
//Generates the values of a
long long int i;
for (i = 0; i < m; i++) {
int aux = rand() % 4;
if (aux == 0)
a[i] = 'A';
else if (aux == 2)
a[i] = 'C';
else if (aux == 3)
a[i] = 'G';
else
a[i] = 'T';
}
//Generates the values of b
for (i = 0; i < n; i++) {
int aux = rand() % 4;
if (aux == 0)
b[i] = 'A';
else if (aux == 2)
b[i] = 'C';
else if (aux == 3)
b[i] = 'G';
else
b[i] = 'T';
}
} /* End of generate */
/*--------------------------------------------------------------------
* External References:
* http://vlab.amrita.edu/?sub=3&brch=274&sim=1433&cnt=1
* http://pt.slideshare.net/avrilcoghlan/the-smith-waterman-algorithm
* http://baba.sourceforge.net/
*/
|
DOMINOSEC_fmt_plug.c | /*
* DOMINOSEC_fmt.c (version 3)
*
* Notes/Domino More Secure Internet Password module for Solar Designer's JtR
* by regenrecht at o2.pl, Dec 2005.
* Algorithm discovery by regenrecht at o2.pl, bartavelle at bandecon.com.
*
* Short description.
* 1. Make 128bit digest of key. (128/8=16 bytes)
* 2. Do bin2hex() of key digest and put braces around it. (16*2+2=34 bytes)
* 3. Concat output of previous step to 5 bytes of salt. (5+34=39 bytes)
* 4. Make 128bit digest of first 34 bytes (out of 39 bytes). (128/8=16 bytes)
* 5. Compare first 10 bytes (out of 16) to check if the key was correct.
*
* Password file should have form of:
* TomaszJegerman:(GKjXibCW2Ml6juyQHUoP)
* RubasznyJan:(GrixoFHOckC/2CnHrHtM)
*
* Further optimizations (including some code rewrites) by Solar Designer
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_DOMINOSEC;
#elif FMT_REGISTERS_H
john_register_one(&fmt_DOMINOSEC);
#else
#include <ctype.h>
#include <string.h>
//#define DOMINOSEC_32BIT
#ifdef DOMINOSEC_32BIT
#include <stdint.h>
#endif
#include "misc.h"
#include "formats.h"
#include "common.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 128
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "dominosec"
#define FORMAT_NAME "Lotus Notes/Domino 6 More Secure Internet Password"
#define ALGORITHM_NAME "8/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 64
#define CIPHERTEXT_LENGTH 22
#define BINARY_SIZE 9 /* oh, well :P */
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_SIZE 5
#define SALT_ALIGN sizeof(uint32_t)
#define DIGEST_SIZE 16
#define BINARY_BUFFER_SIZE (DIGEST_SIZE-SALT_SIZE)
#define ASCII_DIGEST_LENGTH (DIGEST_SIZE*2)
#define MIN_KEYS_PER_CRYPT 3
#define MAX_KEYS_PER_CRYPT 6
static unsigned char (*digest34)[34];
static char (*saved_key)[PLAINTEXT_LENGTH+1];
static uint32_t (*crypt_out)[(DIGEST_SIZE + 3) / sizeof(uint32_t)];
static unsigned char saved_salt[SALT_SIZE];
static int keys_changed, salt_changed;
static const char hex_table[][2] = {
"00", "01", "02", "03", "04", "05", "06", "07",
"08", "09", "0A", "0B", "0C", "0D", "0E", "0F",
"10", "11", "12", "13", "14", "15", "16", "17",
"18", "19", "1A", "1B", "1C", "1D", "1E", "1F",
"20", "21", "22", "23", "24", "25", "26", "27",
"28", "29", "2A", "2B", "2C", "2D", "2E", "2F",
"30", "31", "32", "33", "34", "35", "36", "37",
"38", "39", "3A", "3B", "3C", "3D", "3E", "3F",
"40", "41", "42", "43", "44", "45", "46", "47",
"48", "49", "4A", "4B", "4C", "4D", "4E", "4F",
"50", "51", "52", "53", "54", "55", "56", "57",
"58", "59", "5A", "5B", "5C", "5D", "5E", "5F",
"60", "61", "62", "63", "64", "65", "66", "67",
"68", "69", "6A", "6B", "6C", "6D", "6E", "6F",
"70", "71", "72", "73", "74", "75", "76", "77",
"78", "79", "7A", "7B", "7C", "7D", "7E", "7F",
"80", "81", "82", "83", "84", "85", "86", "87",
"88", "89", "8A", "8B", "8C", "8D", "8E", "8F",
"90", "91", "92", "93", "94", "95", "96", "97",
"98", "99", "9A", "9B", "9C", "9D", "9E", "9F",
"A0", "A1", "A2", "A3", "A4", "A5", "A6", "A7",
"A8", "A9", "AA", "AB", "AC", "AD", "AE", "AF",
"B0", "B1", "B2", "B3", "B4", "B5", "B6", "B7",
"B8", "B9", "BA", "BB", "BC", "BD", "BE", "BF",
"C0", "C1", "C2", "C3", "C4", "C5", "C6", "C7",
"C8", "C9", "CA", "CB", "CC", "CD", "CE", "CF",
"D0", "D1", "D2", "D3", "D4", "D5", "D6", "D7",
"D8", "D9", "DA", "DB", "DC", "DD", "DE", "DF",
"E0", "E1", "E2", "E3", "E4", "E5", "E6", "E7",
"E8", "E9", "EA", "EB", "EC", "ED", "EE", "EF",
"F0", "F1", "F2", "F3", "F4", "F5", "F6", "F7",
"F8", "F9", "FA", "FB", "FC", "FD", "FE", "FF"
};
static const unsigned char lotus_magic_table[] = {
0xbd, 0x56, 0xea, 0xf2, 0xa2, 0xf1, 0xac, 0x2a,
0xb0, 0x93, 0xd1, 0x9c, 0x1b, 0x33, 0xfd, 0xd0,
0x30, 0x04, 0xb6, 0xdc, 0x7d, 0xdf, 0x32, 0x4b,
0xf7, 0xcb, 0x45, 0x9b, 0x31, 0xbb, 0x21, 0x5a,
0x41, 0x9f, 0xe1, 0xd9, 0x4a, 0x4d, 0x9e, 0xda,
0xa0, 0x68, 0x2c, 0xc3, 0x27, 0x5f, 0x80, 0x36,
0x3e, 0xee, 0xfb, 0x95, 0x1a, 0xfe, 0xce, 0xa8,
0x34, 0xa9, 0x13, 0xf0, 0xa6, 0x3f, 0xd8, 0x0c,
0x78, 0x24, 0xaf, 0x23, 0x52, 0xc1, 0x67, 0x17,
0xf5, 0x66, 0x90, 0xe7, 0xe8, 0x07, 0xb8, 0x60,
0x48, 0xe6, 0x1e, 0x53, 0xf3, 0x92, 0xa4, 0x72,
0x8c, 0x08, 0x15, 0x6e, 0x86, 0x00, 0x84, 0xfa,
0xf4, 0x7f, 0x8a, 0x42, 0x19, 0xf6, 0xdb, 0xcd,
0x14, 0x8d, 0x50, 0x12, 0xba, 0x3c, 0x06, 0x4e,
0xec, 0xb3, 0x35, 0x11, 0xa1, 0x88, 0x8e, 0x2b,
0x94, 0x99, 0xb7, 0x71, 0x74, 0xd3, 0xe4, 0xbf,
0x3a, 0xde, 0x96, 0x0e, 0xbc, 0x0a, 0xed, 0x77,
0xfc, 0x37, 0x6b, 0x03, 0x79, 0x89, 0x62, 0xc6,
0xd7, 0xc0, 0xd2, 0x7c, 0x6a, 0x8b, 0x22, 0xa3,
0x5b, 0x05, 0x5d, 0x02, 0x75, 0xd5, 0x61, 0xe3,
0x18, 0x8f, 0x55, 0x51, 0xad, 0x1f, 0x0b, 0x5e,
0x85, 0xe5, 0xc2, 0x57, 0x63, 0xca, 0x3d, 0x6c,
0xb4, 0xc5, 0xcc, 0x70, 0xb2, 0x91, 0x59, 0x0d,
0x47, 0x20, 0xc8, 0x4f, 0x58, 0xe0, 0x01, 0xe2,
0x16, 0x38, 0xc4, 0x6f, 0x3b, 0x0f, 0x65, 0x46,
0xbe, 0x7e, 0x2d, 0x7b, 0x82, 0xf9, 0x40, 0xb5,
0x1d, 0x73, 0xf8, 0xeb, 0x26, 0xc7, 0x87, 0x97,
0x25, 0x54, 0xb1, 0x28, 0xaa, 0x98, 0x9d, 0xa5,
0x64, 0x6d, 0x7a, 0xd4, 0x10, 0x81, 0x44, 0xef,
0x49, 0xd6, 0xae, 0x2e, 0xdd, 0x76, 0x5c, 0x2f,
0xa7, 0x1c, 0xc9, 0x09, 0x69, 0x9a, 0x83, 0xcf,
0x29, 0x39, 0xb9, 0xe9, 0x4c, 0xff, 0x43, 0xab,
/* double power! */
0xbd, 0x56, 0xea, 0xf2, 0xa2, 0xf1, 0xac, 0x2a,
0xb0, 0x93, 0xd1, 0x9c, 0x1b, 0x33, 0xfd, 0xd0,
0x30, 0x04, 0xb6, 0xdc, 0x7d, 0xdf, 0x32, 0x4b,
0xf7, 0xcb, 0x45, 0x9b, 0x31, 0xbb, 0x21, 0x5a,
0x41, 0x9f, 0xe1, 0xd9, 0x4a, 0x4d, 0x9e, 0xda,
0xa0, 0x68, 0x2c, 0xc3, 0x27, 0x5f, 0x80, 0x36
};
static struct fmt_tests tests[] = {
{"(GVMroLzc50YK/Yd+L8KH)", ""},
{"(GqnUDNNGNUz5HRoelmLU)", "x"},
{"(GNBpcGJRYpBe9orUOpmZ)", "dupaaa123"},
{"(G0xjUQzdKxvHpUYqo5hU)", "koziolekmatolek"},
{"(G+dfECo845XxUw+nFVYD)", "szesnascieznakow"},
{"(GowT5I2hVHZpRWpvGmux)", "terazjakiesdwadziesciacos"},
{"(Gq2bAtpguiTSSycy6dhu)", "trzydziescidwamozesieudaojnieuda"},
{"(G82TtgNcqcHGkpEo7wQp)", "looongrandominputdataforfunbutnotonlyoi!"},
{NULL}
};
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
digest34 = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*digest34));
keys_changed = salt_changed = 0;
}
static void done(void)
{
MEM_FREE(digest34);
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static struct {
unsigned char salt[SALT_SIZE];
unsigned char hash[BINARY_BUFFER_SIZE];
} cipher_binary_struct;
static void mdtransform_norecalc_1(unsigned char state[16], unsigned char block[16])
{
union {
unsigned char c[48];
#ifdef DOMINOSEC_32BIT
uint32_t u32[12];
#endif
} x;
unsigned char *p;
unsigned int i, j, t;
t = 0; p = x.c;
for (j = 48; j > 32; j--) {
t = state[p - x.c] ^ lotus_magic_table[j + t];
*p++ = t;
}
for (; j > 16; j--) {
t = block[p - x.c - 16] ^ lotus_magic_table[j + t];
*p++ = t;
}
for (; j > 0; j--) {
t = state[p - x.c - 32] ^ block[p - x.c - 32] ^ lotus_magic_table[j + t];
*p++ = t;
}
#ifndef DOMINOSEC_32BIT
for (i = 0; i < 16; i++) {
p = x.c;
for (j = 48; j > 0; j--) {
t = *p++ ^= lotus_magic_table[j-- + t];
t = *p++ ^= lotus_magic_table[j-- + t];
t = *p++ ^= lotus_magic_table[j-- + t];
t = *p++ ^= lotus_magic_table[j-- + t];
t = *p++ ^= lotus_magic_table[j-- + t];
t = *p++ ^= lotus_magic_table[j-- + t];
t = *p++ ^= lotus_magic_table[j-- + t];
t = *p++ ^= lotus_magic_table[j-- + t];
t = *p++ ^= lotus_magic_table[j-- + t];
t = *p++ ^= lotus_magic_table[j-- + t];
t = *p++ ^= lotus_magic_table[j-- + t];
t = *p++ ^= lotus_magic_table[j + t];
}
}
#else
for (i = 0; i < 16; i++) {
uint32_t *q = x.u32;
p = x.c;
for (j = 48; j > 0; j--) {
uint32_t u = *q++;
t = *p++ = u ^ lotus_magic_table[j-- + t];
t = *p++ = (u >> 8) ^ lotus_magic_table[j-- + t];
u >>= 16;
t = *p++ = u ^ lotus_magic_table[j-- + t];
t = *p++ = (u >> 8) ^ lotus_magic_table[j + t];
}
}
#endif
p = x.c;
for (j = 48; j > 32; j--) {
state[p - x.c] = t = *p ^ lotus_magic_table[j + t];
p++;
}
}
static void mdtransform_1(unsigned char state[16],
unsigned char checksum[16], unsigned char block[16])
{
unsigned char c;
unsigned int i, t;
mdtransform_norecalc_1(state, block);
t = checksum[15];
for (i = 0; i < 16; i++) {
c = lotus_magic_table[block[i] ^ t];
t = checksum[i] ^= c;
}
}
static void mdtransform_norecalc_3(unsigned char state[3][16],
unsigned char block0[16],
unsigned char block1[16],
unsigned char block2[16])
{
union {
unsigned char c[48];
#ifdef DOMINOSEC_32BIT
uint32_t u32[12];
#endif
} x[3];
unsigned char *p0, *p1, *p2;
unsigned int i, j, t0, t1, t2;
t0 = t1 = t2 = 0;
p0 = x[0].c;
p1 = x[1].c;
p2 = x[2].c;
for (j = 48; j > 32; j--) {
t0 = state[0][p0 - x[0].c] ^ lotus_magic_table[j + t0];
t1 = state[1][p1 - x[1].c] ^ lotus_magic_table[j + t1];
t2 = state[2][p2 - x[2].c] ^ lotus_magic_table[j + t2];
*p0++ = t0;
*p1++ = t1;
*p2++ = t2;
}
for (; j > 16; j--) {
t0 = block0[p0 - x[0].c - 16] ^ lotus_magic_table[j + t0];
t1 = block1[p1 - x[1].c - 16] ^ lotus_magic_table[j + t1];
t2 = block2[p2 - x[2].c - 16] ^ lotus_magic_table[j + t2];
*p0++ = t0;
*p1++ = t1;
*p2++ = t2;
}
for (; j > 0; j--) {
t0 = state[0][p0 - x[0].c - 32] ^ block0[p0 - x[0].c - 32] ^ lotus_magic_table[j + t0];
t1 = state[1][p1 - x[1].c - 32] ^ block1[p1 - x[1].c - 32] ^ lotus_magic_table[j + t1];
t2 = state[2][p2 - x[2].c - 32] ^ block2[p2 - x[2].c - 32] ^ lotus_magic_table[j + t2];
*p0++ = t0;
*p1++ = t1;
*p2++ = t2;
}
#ifndef DOMINOSEC_32BIT
for (i = 0; i < 16; i++) {
p0 = x[0].c;
p1 = x[1].c;
p2 = x[2].c;
for (j = 48; j > 0; j--) {
t0 = *p0++ ^= lotus_magic_table[j + t0];
t1 = *p1++ ^= lotus_magic_table[j + t1];
t2 = *p2++ ^= lotus_magic_table[j-- + t2];
t0 = *p0++ ^= lotus_magic_table[j + t0];
t1 = *p1++ ^= lotus_magic_table[j + t1];
t2 = *p2++ ^= lotus_magic_table[j-- + t2];
t0 = *p0++ ^= lotus_magic_table[j + t0];
t1 = *p1++ ^= lotus_magic_table[j + t1];
t2 = *p2++ ^= lotus_magic_table[j-- + t2];
t0 = *p0++ ^= lotus_magic_table[j + t0];
t1 = *p1++ ^= lotus_magic_table[j + t1];
t2 = *p2++ ^= lotus_magic_table[j + t2];
}
}
#else
for (i = 0; i < 16; i++) {
uint32_t *q0 = x[0].u32;
uint32_t *q1 = x[1].u32;
uint32_t *q2 = x[2].u32;
p0 = x[0].c;
p1 = x[1].c;
p2 = x[2].c;
for (j = 48; j > 0; j--) {
uint32_t u0 = *q0++;
uint32_t u1 = *q1++;
uint32_t u2 = *q2++;
t0 = *p0++ = u0 ^ lotus_magic_table[j + t0];
t1 = *p1++ = u1 ^ lotus_magic_table[j + t1];
t2 = *p2++ = u2 ^ lotus_magic_table[j-- + t2];
t0 = *p0++ = (u0 >> 8) ^ lotus_magic_table[j + t0];
t1 = *p1++ = (u1 >> 8) ^ lotus_magic_table[j + t1];
t2 = *p2++ = (u2 >> 8) ^ lotus_magic_table[j-- + t2];
u0 >>= 16;
u1 >>= 16;
u2 >>= 16;
t0 = *p0++ = u0 ^ lotus_magic_table[j + t0];
t1 = *p1++ = u1 ^ lotus_magic_table[j + t1];
t2 = *p2++ = u2 ^ lotus_magic_table[j-- + t2];
t0 = *p0++ = (u0 >> 8) ^ lotus_magic_table[j + t0];
t1 = *p1++ = (u1 >> 8) ^ lotus_magic_table[j + t1];
t2 = *p2++ = (u2 >> 8) ^ lotus_magic_table[j + t2];
}
}
#endif
p0 = x[0].c;
p1 = x[1].c;
p2 = x[2].c;
for (j = 48; j > 32; j--) {
state[0][p0 - x[0].c] = t0 = *p0 ^ lotus_magic_table[j + t0];
state[1][p1 - x[1].c] = t1 = *p1 ^ lotus_magic_table[j + t1];
state[2][p2 - x[2].c] = t2 = *p2 ^ lotus_magic_table[j + t2];
p0++;
p1++;
p2++;
}
}
static void mdtransform_3(unsigned char state[3][16],
unsigned char checksum[3][16],
unsigned char block0[16],
unsigned char block1[16],
unsigned char block2[16])
{
unsigned int i, t0, t1, t2;
mdtransform_norecalc_3(state, block0, block1, block2);
t0 = checksum[0][15];
t1 = checksum[1][15];
t2 = checksum[2][15];
for (i = 0; i < 16; i++) {
t0 = checksum[0][i] ^= lotus_magic_table[block0[i] ^ t0];
t1 = checksum[1][i] ^= lotus_magic_table[block1[i] ^ t1];
t2 = checksum[2][i] ^= lotus_magic_table[block2[i] ^ t2];
}
}
#if 0
static void domino_big_md_1(unsigned char *in, unsigned int size, unsigned char *out)
{
unsigned char state[16] = {0};
unsigned char checksum[16] = {0};
unsigned char block[16];
unsigned int curpos = 0;
while (curpos + 15 < size) {
mdtransform_1(state, checksum, in + curpos);
curpos += 16;
}
{
unsigned int pad = size - curpos;
memcpy(block, in + curpos, pad);
memset(block + pad, 16 - pad, 16 - pad);
mdtransform_1(state, checksum, block);
}
mdtransform_norecalc_1(state, checksum);
memcpy(out, state, 16);
}
#endif
static void domino_big_md_3(unsigned char *in0, unsigned int size0,
unsigned char *in1, unsigned int size1,
unsigned char *in2, unsigned int size2,
unsigned char *out0, unsigned char *out1, unsigned char *out2)
{
unsigned char state[3][16] = {{0}, {0}, {0}};
unsigned char checksum[3][16] = {{0}, {0}, {0}};
unsigned char block[3][16];
unsigned int min, curpos = 0, curpos0, curpos1, curpos2;
min = (size0 < size1) ? size0 : size1;
if (size2 < min)
min = size2;
while (curpos + 15 < min) {
mdtransform_3(state, checksum,
in0 + curpos, in1 + curpos, in2 + curpos);
curpos += 16;
}
curpos0 = curpos;
while (curpos0 + 15 < size0) {
mdtransform_1(state[0], checksum[0], in0 + curpos0);
curpos0 += 16;
}
curpos1 = curpos;
while (curpos1 + 15 < size1) {
mdtransform_1(state[1], checksum[1], in1 + curpos1);
curpos1 += 16;
}
curpos2 = curpos;
while (curpos2 + 15 < size2) {
mdtransform_1(state[2], checksum[2], in2 + curpos2);
curpos2 += 16;
}
{
unsigned int pad0 = size0 - curpos0;
unsigned int pad1 = size1 - curpos1;
unsigned int pad2 = size2 - curpos2;
memcpy(block[0], in0 + curpos0, pad0);
memcpy(block[1], in1 + curpos1, pad1);
memcpy(block[2], in2 + curpos2, pad2);
memset(block[0] + pad0, 16 - pad0, 16 - pad0);
memset(block[1] + pad1, 16 - pad1, 16 - pad1);
memset(block[2] + pad2, 16 - pad2, 16 - pad2);
mdtransform_3(state, checksum, block[0], block[1], block[2]);
}
mdtransform_norecalc_3(state, checksum[0], checksum[1], checksum[2]);
memcpy(out0, state[0], 16);
memcpy(out1, state[1], 16);
memcpy(out2, state[2], 16);
}
static void domino_big_md_3_34(unsigned char *in0,
unsigned char *in1,
unsigned char *in2,
unsigned char *out0,
unsigned char *out1,
unsigned char *out2)
{
unsigned char state[3][16] = {{0}, {0}, {0}};
unsigned char checksum[3][16] = {{0}, {0}, {0}};
unsigned char block[3][16];
mdtransform_3(state, checksum, in0, in1, in2);
mdtransform_3(state, checksum, in0 + 16, in1 + 16, in2 + 16);
memcpy(block[0], in0 + 32, 2);
memcpy(block[1], in1 + 32, 2);
memcpy(block[2], in2 + 32, 2);
memset(block[0] + 2, 14, 14);
memset(block[1] + 2, 14, 14);
memset(block[2] + 2, 14, 14);
mdtransform_3(state, checksum, block[0], block[1], block[2]);
mdtransform_norecalc_3(state, checksum[0], checksum[1], checksum[2]);
memcpy(out0, state[0], 16);
memcpy(out1, state[1], 16);
memcpy(out2, state[2], 16);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
unsigned int i;
unsigned char ch;
if (strnlen(ciphertext, CIPHERTEXT_LENGTH + 1) != CIPHERTEXT_LENGTH)
return 0;
if (ciphertext[0] != '(' ||
ciphertext[1] != 'G' ||
ciphertext[CIPHERTEXT_LENGTH-1] != ')')
return 0;
for (i = 1; i < CIPHERTEXT_LENGTH-1; ++i) {
ch = ciphertext[i];
if (!isalnum(ch) && ch != '+' && ch != '/')
return 0;
}
return 1;
}
/*
static unsigned int proper_mul(int delta_apsik)
{
__asm__("movl $0xAAAAAAAB, %eax \n"
"movl 0x8(%ebp), %edx \n"
"mul %edx \n"
"shr $0x2,%edx \n"
"movl %edx, %eax \n");
}
*/
static void decode(unsigned char *ascii_cipher, unsigned char *binary)
{
unsigned int out = 0, apsik = 0, loop;
unsigned int i;
unsigned char ch;
ascii_cipher += 2;
i = 0;
do {
if (apsik < 8) {
/* should be using proper_mul, but what the heck...
it's nearly the same :] */
loop = 2; /* ~ loop = proper_mul(13 - apsik); */
apsik += loop*6;
do {
out <<= 6;
ch = *ascii_cipher;
if (ch < '0' || ch > '9')
if (ch < 'A' || ch > 'Z')
if (ch < 'a' || ch > 'z')
if (ch != '+')
if (ch == '/')
out += '?';
else
{ ; } /* shit happens */
else
out += '>';
else
out += ch-'=';
else
out += ch-'7';
else
out += ch-'0';
++ascii_cipher;
} while (--loop);
}
loop = apsik-8;
ch = out >> loop;
*(binary+i) = ch;
ch <<= loop;
apsik = loop;
out -= ch;
} while (++i < 15);
binary[3] += -4;
}
static void *get_binary(char *ciphertext)
{
static uint32_t out[BINARY_SIZE / sizeof(uint32_t) + 1];
decode((unsigned char*)ciphertext, (unsigned char*)&cipher_binary_struct);
memcpy(out, cipher_binary_struct.hash, BINARY_SIZE);
return (void*)out;
}
static void *get_salt(char *ciphertext)
{
static uint32_t out[SALT_SIZE / sizeof(uint32_t) + 1];
decode((unsigned char*)ciphertext, (unsigned char*)&cipher_binary_struct);
memcpy(out, cipher_binary_struct.salt, SALT_SIZE);
return (void*)out;
}
static void set_salt(void *salt)
{
memcpy(saved_salt, salt, SALT_SIZE);
salt_changed = 1;
}
static void set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1);
keys_changed = 1;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index += 3) {
int i, j;
if (keys_changed) {
char *k0 = saved_key[index];
char *k1 = saved_key[index + 1];
char *k2 = saved_key[index + 2];
unsigned char digest16[3][16];
domino_big_md_3((unsigned char *)k0, strlen(k0),
(unsigned char *)k1, strlen(k1),
(unsigned char *)k2, strlen(k2),
digest16[0], digest16[1], digest16[2]);
/* Not (++i < 16) !
* Domino will do hash of first 34 bytes ignoring The Fact that now
* there is a salt at a beginning of buffer. This means that last 5
* bytes "EEFF)" of password digest are meaningless.
*/
for (i = 0, j = 6; i < 14; i++, j += 2) {
const char *hex2 = hex_table[ARCH_INDEX(digest16[0][i])];
digest34[index][j] = hex2[0];
digest34[index][j + 1] = hex2[1];
hex2 = hex_table[ARCH_INDEX(digest16[1][i])];
digest34[index + 1][j] = hex2[0];
digest34[index + 1][j + 1] = hex2[1];
hex2 = hex_table[ARCH_INDEX(digest16[2][i])];
digest34[index + 2][j] = hex2[0];
digest34[index + 2][j + 1] = hex2[1];
}
}
if (salt_changed) {
digest34[index + 2][0] = digest34[index + 1][0] =
digest34[index][0] = saved_salt[0];
digest34[index + 2][1] = digest34[index + 1][1] =
digest34[index][1] = saved_salt[1];
digest34[index + 2][2] = digest34[index + 1][2] =
digest34[index][2] = saved_salt[2];
digest34[index + 2][3] = digest34[index + 1][3] =
digest34[index][3] = saved_salt[3];
digest34[index + 2][4] = digest34[index + 1][4] =
digest34[index][4] = saved_salt[4];
digest34[index + 2][5] = digest34[index + 1][5] =
digest34[index][5] = '(';
}
domino_big_md_3_34(digest34[index], digest34[index + 1],
digest34[index + 2],
(unsigned char *)crypt_out[index],
(unsigned char *)crypt_out[index + 1],
(unsigned char *)crypt_out[index + 2]);
}
keys_changed = salt_changed = 0;
return count;
}
static int cmp_all(void *binary, int count)
{
/*
* Only 10 bytes of digest are to be checked.
* 48 bits are left alone.
* Funny that.
*/
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
#define COMMON_GET_HASH_VAR crypt_out
#include "common-get-hash.h"
static int salt_hash(void *salt)
{
//printf("salt %08x hash %03x\n", *(uint32_t*)salt, *(uint32_t*)salt & (SALT_HASH_SIZE - 1));
return *(uint32_t*)salt & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_DOMINOSEC = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ NULL },
tests
},
{
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
helpers.c | /*******************************************************************************
Collective Matrix Factorization
-------------------------------
This is a module for multi-way factorization of sparse and dense matrices
intended to be used for recommender system with explicit feedback data plus
side information about users and/or items.
The reference papers are:
(a) Cortes, David.
"Cold-start recommendations in Collective Matrix Factorization."
arXiv preprint arXiv:1809.00366 (2018).
(b) Singh, Ajit P., and Geoffrey J. Gordon.
"Relational learning via collective matrix factorization."
Proceedings of the 14th ACM SIGKDD international conference on
Knowledge discovery and data mining. 2008.
(c) Hu, Yifan, Yehuda Koren, and Chris Volinsky.
"Collaborative filtering for implicit feedback datasets."
2008 Eighth IEEE International Conference on Data Mining.
Ieee, 2008.
(d) Takacs, Gabor, Istvan Pilaszy, and Domonkos Tikk.
"Applications of the conjugate gradient method for
implicit feedback collaborative filtering."
Proceedings of the fifth ACM conference on
Recommender systems. 2011.
(e) Rendle, Steffen, Li Zhang, and Yehuda Koren.
"On the difficulty of evaluating baselines:
A study on recommender systems."
arXiv preprint arXiv:1905.01395 (2019).
(f) Franc, Vojtech, Vaclav Hlavac, and Mirko Navara.
"Sequential coordinate-wise algorithm for the
non-negative least squares problem."
International Conference on Computer Analysis of Images
and Patterns. Springer, Berlin, Heidelberg, 2005.
(g) Zhou, Yunhong, et al.
"Large-scale parallel collaborative filtering for
the netflix prize."
International conference on algorithmic applications in management.
Springer, Berlin, Heidelberg, 2008.
For information about the models offered here and how they are fit to
the data, see the files 'collective.c' and 'offsets.c'.
Written for C99 standard and OpenMP version 2.0 or higher, and aimed to be
used either as a stand-alone program, or wrapped into scripting languages
such as Python and R.
<https://www.github.com/david-cortes/cmfrec>
MIT License:
Copyright (c) 2020-2021 David Cortes
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
*******************************************************************************/
#include "cmfrec.h"
/* Note: in x86_64 computers, there's hardly any speed up from having > 2
threads zeroing out an array */
void set_to_zero_(real_t *arr, size_t n, int nthreads)
{
if (n == 0) return;
#if defined(_OPENMP)
nthreads = (nthreads > 1)? 2 : 1;
size_t chunk_size = n / (size_t)nthreads;
size_t remainder = n % (size_t)nthreads;
int_t i = 0;
if (nthreads > 1 && n > (size_t)1e8)
{
#pragma omp parallel for schedule(static, 1) \
firstprivate(arr, chunk_size, nthreads) num_threads(nthreads)
for (i = 0; i < nthreads; i++)
memset(arr + i * chunk_size, 0, chunk_size*sizeof(real_t));
if (remainder > 0)
memset(arr + nthreads * chunk_size, 0, remainder*sizeof(real_t));
} else
#endif
{
memset(arr, 0, n*sizeof(real_t));
}
}
/* Note: in x86_64 computers, there's hardly any speed up from having > 4
threads copying arrays */
void copy_arr_(real_t *restrict src, real_t *restrict dest, size_t n, int nthreads)
{
/* Note: don't use BLAS scopy as it's actually much slower */
if (n == 0) return;
#if defined(_OPENMP)
if (nthreads > 1 && n > (size_t)1e8)
{
nthreads = cap_to_4(nthreads);
size_t chunk_size = n / (size_t)nthreads;
size_t remainder = n % (size_t)nthreads;
int_t i = 0;
#pragma omp parallel for schedule(static, 1) \
firstprivate(src, dest, chunk_size, nthreads) num_threads(nthreads)
for (i = 0; i < nthreads; i++)
memcpy(dest + i * chunk_size, src + i * chunk_size, chunk_size*sizeof(real_t));
if (remainder > 0)
memcpy(dest + nthreads*chunk_size, src + nthreads*chunk_size, remainder*sizeof(real_t));
} else
#endif
{
memcpy(dest, src, n*sizeof(real_t));
}
}
/* Note: the C99 standard only guarantes that isnan(NAN)!=0, and some compilers
like mingw64 will NOT make isnan(NAN)==1. */
int_t count_NAs(real_t arr[], size_t n, int nthreads)
{
int_t cnt_NA = 0;
nthreads = cap_to_4(nthreads);
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix;
#endif
#pragma omp parallel for schedule(static) num_threads(nthreads) shared(arr, n) reduction(+:cnt_NA)
for (size_t_for ix = 0; ix < n; ix++)
cnt_NA += isnan(arr[ix]) != 0;
if (cnt_NA < 0) cnt_NA = INT_MAX; /* <- overflow */
return cnt_NA;
}
void count_NAs_by_row
(
real_t *restrict arr, int_t m, int_t n,
int_t *restrict cnt_NA, int nthreads,
bool *restrict full_dense, bool *restrict near_dense,
bool *restrict some_full
)
{
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long row;
#endif
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(m, n, arr, cnt_NA)
for (size_t_for row = 0; row < (size_t)m; row++)
{
int_t cnt = 0;
for (size_t col = 0; col < (size_t)n; col++)
cnt += isnan(arr[col + row*n]) != 0;
cnt_NA[row] = cnt;
}
*full_dense = true;
for (int_t ix = 0; ix < m; ix++) {
if (cnt_NA[ix]) {
*full_dense = false;
break;
}
}
/* Will be considered near-dense if at least 70% of the rows have
no missing values.
This is used later in order to decide whether to use a gradient-
based approach or closed-form when optimizing a matrix in isolation */
*near_dense = false;
int_t cnt_rows_w_NA = 0;
if (!(*full_dense))
{
for (int_t ix = 0; ix < m; ix++)
cnt_rows_w_NA += (cnt_NA[ix] > 0);
if ((m - cnt_rows_w_NA) >= (int)(0.75 * (double)m))
*near_dense = true;
}
*some_full = *full_dense;
if (!(*full_dense))
{
for (int_t ix = 0; ix < m; ix++)
{
if (cnt_NA[ix] == 0) {
*some_full = true;
break;
}
}
}
}
void count_NAs_by_col
(
real_t *restrict arr, int_t m, int_t n,
int_t *restrict cnt_NA,
bool *restrict full_dense, bool *restrict near_dense,
bool *restrict some_full
)
{
for (size_t row = 0; row < (size_t)m; row++)
for (size_t col = 0; col < (size_t)n; col++)
cnt_NA[col] += isnan(arr[col + row*n]) != 0;
*full_dense = true;
for (int_t ix = 0; ix < n; ix++) {
if (cnt_NA[ix]) {
*full_dense = false;
break;
}
}
*near_dense = false;
int_t cnt_rows_w_NA = 0;
if (!(*full_dense))
{
for (int_t ix = 0; ix < n; ix++)
cnt_rows_w_NA += (cnt_NA[ix] > 0);
if ((n - cnt_rows_w_NA) >= (int_t)(0.75 * (real_t)n))
*near_dense = true;
}
*some_full = *full_dense;
if (!(*full_dense))
{
for (int_t ix = 0; ix < n; ix++)
{
if (cnt_NA[ix] == 0) {
*some_full = true;
break;
}
}
}
}
void sum_by_rows(real_t *restrict A, real_t *restrict outp, int_t m, int_t n, int nthreads)
{
nthreads = cap_to_4(nthreads);
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long row;
#endif
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(m, n, A, outp)
for (size_t_for row = 0; row < (size_t)m; row++)
{
double rsum = 0;
for (size_t col = 0; col < (size_t)n; col++)
rsum += A[col + row*(size_t)n];
outp[row] = rsum;
}
}
void sum_by_cols(real_t *restrict A, real_t *restrict outp, int_t m, int_t n, size_t lda, int nthreads)
{
#ifdef _OPENMP
/* Note: GCC and CLANG do a poor optimization when the array to sum has many
rows and few columns, which is the most common use-case for this */
if ((real_t)n > 1e3*(real_t)m && nthreads > 4) /* this assumes there's many columns, in which case there's a speedup */
{
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long col;
#endif
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(A, outp, m, n, lda)
for (size_t_for col = 0; col < (size_t)n; col++)
{
double csum = 0;
for (size_t row = 0; row < (size_t)m; row++)
csum += A[col + row*lda];
outp[col] = csum;
}
}
else
#endif
{
for (size_t row = 0; row < (size_t)m; row++)
for (size_t col = 0; col < (size_t)n; col++)
outp[col] += A[col + row*lda];
}
}
void mat_plus_rowvec(real_t *restrict A, real_t *restrict b, int_t m, int_t n, int nthreads)
{
nthreads = cap_to_4(nthreads);
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long row;
#endif
#pragma omp parallel for schedule(static) num_threads(nthreads) shared(A, b, m, n)
for (size_t_for row = 0; row < (size_t)m; row++)
for (size_t col = 0; col < (size_t)n; col++)
A[col + (size_t)row*n] += b[row];
}
void mat_plus_colvec(real_t *restrict A, real_t *restrict b, real_t alpha, int_t m, int_t n, size_t lda, int nthreads)
{
nthreads = cap_to_4(nthreads);
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long row;
#endif
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(m, n, lda, A, b)
for (size_t_for row = 0; row < (size_t)m; row++)
cblas_taxpy(n, alpha, b, 1, A + row*lda, 1);
}
void mat_minus_rowvec2
(
real_t *restrict Xfull,
int_t ixA[], int_t ixB[], real_t *restrict X, size_t nnz,
real_t *restrict b, int_t m, int_t n, int nthreads
)
{
nthreads = cap_to_4(nthreads);
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long row, ix;
#endif
if (Xfull != NULL)
{
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(Xfull, m, n, b)
for (size_t_for row = 0; row < (size_t)m; row++)
for (size_t col = 0; col < (size_t)n; col++)
Xfull[col + row*(size_t)n] -= b[row];
}
else
{
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(X, b, ixA, nnz)
for (size_t_for ix = 0; ix < nnz; ix++)
X[ix] -= b[ixA[ix]];
}
}
void mat_minus_colvec2
(
real_t *restrict Xfull,
int_t ixA[], int_t ixB[], real_t *restrict X, size_t nnz,
real_t *restrict b, int_t m, int_t n, int nthreads
)
{
nthreads = cap_to_4(nthreads);
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix;
#endif
if (Xfull != NULL)
{
for (size_t row = 0; row < (size_t)m; row++)
for (size_t col = 0; col < (size_t)n; col++)
Xfull[col + row*(size_t)n] -= b[col];
}
else
{
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(X, b, ixA, nnz)
for (size_t_for ix = 0; ix < nnz; ix++)
X[ix] -= b[ixB[ix]];
}
}
void nan_to_zero(real_t *restrict arr, real_t *restrict comp, size_t n, int nthreads)
{
nthreads = cap_to_4(nthreads);
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix;
#endif
#pragma omp parallel for schedule(static) num_threads(nthreads) shared(arr, comp, n)
for (size_t_for ix = 0; ix < n; ix++)
arr[ix] = (!isnan(comp[ix]))? arr[ix] : 0;
}
void mult_if_non_nan(real_t *restrict arr, real_t *restrict comp, real_t *restrict w, size_t n, int nthreads)
{
nthreads = cap_to_4(nthreads);
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix;
#endif
#pragma omp parallel for schedule(static) num_threads(nthreads) shared(arr, w, n)
for (size_t_for ix = 0; ix < n; ix++)
arr[ix] = (!isnan(arr[ix]))? (w[ix] * arr[ix]) : (0);
}
void mult_elemwise(real_t *restrict inout, real_t *restrict other, size_t n, int nthreads)
{
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix;
#endif
#pragma omp parallel for schedule(static) num_threads(nthreads) shared(inout, other, n)
for (size_t_for ix = 0; ix < n; ix++)
inout[ix] *= other[ix];
}
real_t sum_squares(real_t *restrict arr, size_t n, int nthreads)
{
double res = 0;
if (n < (size_t)INT_MAX)
return cblas_tdot((int)n, arr, 1, arr, 1);
else {
nthreads = cap_to_4(nthreads);
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix;
#endif
#pragma omp parallel for schedule(static) num_threads(nthreads) shared(arr, n) reduction(+:res)
for (size_t_for ix = 0; ix < n; ix++)
res += square(arr[ix]);
}
return (real_t)res;
}
void taxpy_large(real_t *restrict A, real_t x, real_t *restrict Y, size_t n, int nthreads)
{
if (n < (size_t)INT_MAX)
cblas_taxpy((int)n, x, A, 1, Y, 1);
else {
nthreads = cap_to_4(nthreads);
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix;
#endif
if (x == 1.)
#pragma omp parallel for schedule(static) num_threads(nthreads) shared(A, Y, n)
for (size_t_for ix = 0; ix < n; ix++)
Y[ix] += A[ix];
else
#pragma omp parallel for schedule(static) num_threads(nthreads) shared(A, x, Y, n)
for (size_t_for ix = 0; ix < n; ix++)
Y[ix] = fma_t(x, A[ix], Y[ix]);
}
}
void tscal_large(real_t *restrict arr, real_t alpha, size_t n, int nthreads)
{
if (alpha == 1.)
return;
if (n < (size_t)INT_MAX)
cblas_tscal((int)n, alpha, arr, 1);
else {
nthreads = cap_to_4(nthreads);
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix;
#endif
#pragma omp parallel for schedule(static) num_threads(nthreads) shared(arr, alpha, n)
for (size_t_for ix = 0; ix < n; ix++)
arr[ix] *= alpha;
}
}
/* Xoshiro256++ and Xoshiro128++
https://prng.di.unimi.it */
static inline uint64_t splitmix64(const uint64_t seed)
{
uint64_t z = (seed + 0x9e3779b97f4a7c15);
z = (z ^ (z >> 30)) * 0xbf58476d1ce4e5b9;
z = (z ^ (z >> 27)) * 0x94d049bb133111eb;
return z ^ (z >> 31);
}
#ifndef USE_XOSHIRO128
static inline uint64_t rotl64(const uint64_t x, const int k) {
return (x << k) | (x >> (64 - k));
}
static inline uint64_t xoshiro256pp(uint64_t state[4])
{
const uint64_t result = rotl64(state[0] + state[3], 23) + state[0];
const uint64_t t = state[1] << 17;
state[2] ^= state[0];
state[3] ^= state[1];
state[1] ^= state[2];
state[0] ^= state[3];
state[2] ^= t;
state[3] = rotl64(state[3], 45);
return result;
}
static inline void xoshiro256pp_jump(uint64_t state[4])
{
const uint64_t JUMP[] = { 0x180ec6d33cfd0aba, 0xd5a61266f0c9392c,
0xa9582618e03fc9aa, 0x39abdc4529b1661c };
uint64_t s0 = 0;
uint64_t s1 = 0;
uint64_t s2 = 0;
uint64_t s3 = 0;
for (int i = 0; i < (int)(sizeof (JUMP) / sizeof (*JUMP)); i++)
{
for (int b = 0; b < 64; b++)
{
if (JUMP[i] & UINT64_C(1) << b)
{
s0 ^= state[0];
s1 ^= state[1];
s2 ^= state[2];
s3 ^= state[3];
}
xoshiro256pp(state);
}
}
state[0] = s0;
state[1] = s1;
state[2] = s2;
state[3] = s3;
}
#else
static inline uint32_t rotl32(const uint32_t x, const int k) {
return (x << k) | (x >> (32 - k));
}
static inline uint32_t xoshiro128pp(uint32_t state[4])
{
const uint32_t result = rotl32(state[0] + state[3], 7) + state[0];
const uint32_t t = state[1] << 9;
state[2] ^= state[0];
state[3] ^= state[1];
state[1] ^= state[2];
state[0] ^= state[3];
state[2] ^= t;
state[3] = rotl32(state[3], 11);
return result;
}
static inline void xoshiro128pp_jump(uint32_t state[4])
{
const uint32_t JUMP[] = { 0x8764000b, 0xf542d2d3,
0x6fa035c3, 0x77f2db5b };
uint32_t s0 = 0;
uint32_t s1 = 0;
uint32_t s2 = 0;
uint32_t s3 = 0;
for(int i = 0; i < (int)(sizeof (JUMP) / sizeof (*JUMP)); i++)
{
for(int b = 0; b < 32; b++)
{
if (JUMP[i] & UINT32_C(1) << b)
{
s0 ^= state[0];
s1 ^= state[1];
s2 ^= state[2];
s3 ^= state[3];
}
xoshiro128pp(state);
}
}
state[0] = s0;
state[1] = s1;
state[2] = s2;
state[3] = s3;
}
#endif
/* Note: for double precision, this uses the Box-Muller transform
with raw form, which is less efficient than the polar form.
Nevertheless, from some experiments, this seems to give slightly better
end results when using double precision, even though it is slower and
loses more numeric precision by boxing to [0, 1] instead of [-1, 1].
For single precision, the polar form tended to give better results.
Note: if generating a uniform random number ~ (0,1), dividing
a random draw by the maximum will not result in a uniform
distribution, as the upper possible numbers are not evenly-spaced.
In these cases, it's necessary to take something up to 2^53 as
this is the interval that's evenly-representable. */
#if defined(USE_DOUBLE) || !(defined(USE_FLOAT) && defined(USE_XOSHIRO128))
void rnorm_xoshiro(real_t *seq, const size_t n, rng_state_t state[4])
{
#ifndef USE_XOSHIRO128
const uint64_t two53_i = (UINT64_C(1) << 53) - UINT64_C(1);
#endif
const double twoPI = 2. * M_PI;
uint64_t rnd1, rnd2;
#ifdef USE_XOSHIRO128
uint32_t rnd11, rnd12, rnd21, rnd22;
const uint32_t two21_i = (UINT32_C(1) << 21) - UINT32_C(1);
const uint32_t ONE = 1;
const bool is_little_endian = *((unsigned char*)&ONE) != 0;
#endif
double u, v;
size_t n_ = n / (size_t)2;
for (size_t ix = 0; ix < n_; ix++)
{
do
{
#ifdef USE_XOSHIRO128
rnd11 = xoshiro128pp(state);
rnd12 = xoshiro128pp(state);
rnd21 = xoshiro128pp(state);
rnd22 = xoshiro128pp(state);
#else
rnd1 = xoshiro256pp(state);
rnd2 = xoshiro256pp(state);
#endif
#if defined(DBL_MANT_DIG) && (DBL_MANT_DIG == 53) &&(FLT_RADIX == 2)
#ifdef USE_XOSHIRO128
if (is_little_endian) {
rnd12 = rnd12 & two21_i;
rnd22 = rnd22 & two21_i;
} else {
rnd11 = rnd11 & two21_i;
rnd21 = rnd21 & two21_i;
}
memcpy((char*)&rnd1, &rnd11, sizeof(uint32_t));
memcpy((char*)&rnd1 + sizeof(uint32_t), &rnd12, sizeof(uint32_t));
memcpy((char*)&rnd2, &rnd21, sizeof(uint32_t));
memcpy((char*)&rnd2 + sizeof(uint32_t), &rnd22, sizeof(uint32_t));
u = ldexp((double)rnd1, -53);
v = ldexp((double)rnd2, -53);
#else
u = ldexp((double)(rnd1 & two53_i), -53);
v = ldexp((double)(rnd2 & two53_i), -53);
#endif
#else
u = (double)rnd1 / (double)UINT64_MAX;
v = (double)rnd2 / (double)UINT64_MAX;
#endif
}
while (u == 0 || v == 0);
u = sqrt(-2. * log(u));
seq[(size_t)2*ix] = (real_t)ldexp(cos(twoPI * v) * u, -7);
seq[(size_t)2*ix + (size_t)1] = (real_t)ldexp(sin(twoPI * v) * u, -7);
}
if ((n % (size_t)2) != 0)
{
do
{
#ifdef USE_XOSHIRO128
rnd11 = xoshiro128pp(state);
rnd12 = xoshiro128pp(state);
rnd21 = xoshiro128pp(state);
rnd22 = xoshiro128pp(state);
#else
rnd1 = xoshiro256pp(state);
rnd2 = xoshiro256pp(state);
#endif
#if defined(DBL_MANT_DIG) && (DBL_MANT_DIG == 53) &&(FLT_RADIX == 2)
#ifdef USE_XOSHIRO128
if (is_little_endian) {
rnd12 = rnd12 & two21_i;
rnd22 = rnd22 & two21_i;
} else {
rnd11 = rnd11 & two21_i;
rnd21 = rnd21 & two21_i;
}
memcpy((char*)&rnd1, &rnd11, sizeof(uint32_t));
memcpy((char*)&rnd1 + sizeof(uint32_t), &rnd12, sizeof(uint32_t));
memcpy((char*)&rnd2, &rnd21, sizeof(uint32_t));
memcpy((char*)&rnd2 + sizeof(uint32_t), &rnd22, sizeof(uint32_t));
u = ldexp((double)rnd1, -53);
v = ldexp((double)rnd2, -53);
#else
u = ldexp((double)(rnd1 & two53_i), -53);
v = ldexp((double)(rnd2 & two53_i), -53);
#endif
#else
u = (double)rnd1 / (double)UINT64_MAX;
v = (double)rnd2 / (double)UINT64_MAX;
#endif
}
while (u == 0 || v == 0);
u = sqrt(-2. * log(u));
seq[n - (size_t)1] = (real_t)ldexp(cos(twoPI * v) * u, -7);
}
}
#else
void rnorm_xoshiro(float *seq, const size_t n, rng_state_t state[4])
{
const uint32_t two25_i = (UINT32_C(1) << 25) - UINT32_C(1);
const int32_t two24_i = (UINT32_C(1) << 24);
uint32_t rnd1, rnd2;
#ifndef USE_XOSHIRO128
uint64_t rnd0;
#endif
float u, v, s;
size_t n_ = n / (size_t)2;
for (size_t ix = 0; ix < n_; ix++)
{
do
{
#ifdef USE_XOSHIRO128
rnd1 = xoshiro128pp(state);
rnd2 = xoshiro128pp(state);
#else
rnd0 = xoshiro256pp(state);
memcpy(&rnd1, (char*)&rnd0, sizeof(uint32_t));
memcpy(&rnd2, (char*)&rnd0 + sizeof(uint32_t), sizeof(uint32_t));
#endif
#if defined(FLT_MANT_DIG) && (FLT_MANT_DIG == 24) &&(FLT_RADIX == 2)
u = ldexpf((float)((int32_t)(rnd1 & two25_i) - two24_i), -24);
v = ldexpf((float)((int32_t)(rnd2 & two25_i) - two24_i), -24);
#else
u = (float)rnd1 / (float)INT32_MAX;
v = (float)rnd2 / (float)INT32_MAX;
#endif
s = square(u) + square(v);
}
while (s == 0 || s >= 1);
s = sqrtf((-2.0f / s) * logf(s));
seq[(size_t)2*ix] = ldexpf(u * s, -7);
seq[(size_t)2*ix + (size_t)1] = ldexpf(v * s, -7);
}
if ((n % (size_t)2) != 0)
{
do
{
#ifdef USE_XOSHIRO128
rnd1 = xoshiro128pp(state);
rnd2 = xoshiro128pp(state);
#else
rnd0 = xoshiro256pp(state);
memcpy(&rnd1, (char*)&rnd0, sizeof(uint32_t));
memcpy(&rnd2, (char*)&rnd0 + sizeof(uint32_t), sizeof(uint32_t));
#endif
#if defined(FLT_MANT_DIG) && (FLT_MANT_DIG == 24) &&(FLT_RADIX == 2)
u = ldexpf((float)((int32_t)(rnd1 & two25_i) - two24_i), -24);
v = ldexpf((float)((int32_t)(rnd2 & two25_i) - two24_i), -24);
#else
u = (float)rnd1 / (float)INT32_MAX;
v = (float)rnd2 / (float)INT32_MAX;
#endif
s = square(u) + square(v);
}
while (s == 0 || s >= 1);
s = sqrtf((-2.0f / s) * logf(s));
seq[n - (size_t)1] = ldexpf(u * s, -7);
}
}
#endif
void seed_state(int_t seed, rng_state_t state[4])
{
#ifdef USE_XOSHIRO128
uint64_t s1 = splitmix64(seed);
uint64_t s2 = splitmix64(s1);
memcpy(state, &s1, sizeof(uint64_t));
memcpy(&state[2], &s2, sizeof(uint64_t));
#else
state[0] = splitmix64(seed);
state[1] = splitmix64(state[0]);
state[2] = splitmix64(state[1]);
state[3] = splitmix64(state[2]);
#endif
}
void fill_rnorm_buckets
(
const size_t n_buckets, real_t *arr, const size_t n,
real_t **ptr_bucket, size_t *sz_bucket, const size_t BUCKET_SIZE
)
{
if (n_buckets == 0 || n == 0) return;
for (size_t bucket = 0; bucket < n_buckets; bucket++)
{
ptr_bucket[bucket] = arr;
arr += BUCKET_SIZE;
}
sz_bucket[n_buckets-(size_t)1] = n - BUCKET_SIZE*(n_buckets-(size_t)1);
}
void rnorm_singlethread(ArraysToFill arrays, rng_state_t state[4])
{
if (arrays.sizeA)
rnorm_xoshiro(arrays.A, arrays.sizeA, state);
if (arrays.sizeB)
rnorm_xoshiro(arrays.B, arrays.sizeB, state);
}
/* This function generates random normal numbers in parallel, but dividing the
arrays to fill into buckets of up to 250k each. It uses the jumping technique
from the Xorshiro family in order to ensure that the generated numbers will
not overlap. */
int_t rnorm_parallel(ArraysToFill arrays, int_t seed, int nthreads)
{
#ifdef USE_R_RNG
GetRNGstate();
for (size_t ix = 0; ix < arrays.sizeA; ix++)
arrays.A[ix] = norm_rand();
for (size_t ix = 0; ix < arrays.sizeB; ix++)
arrays.B[ix] = norm_rand();
PutRNGstate();
return 0;
#endif
const size_t BUCKET_SIZE = (size_t)250000;
rng_state_t initial_state[4];
seed_state(seed, initial_state);
if (arrays.sizeA + arrays.sizeB < BUCKET_SIZE)
{
rnorm_singlethread(arrays, initial_state);
return 0;
}
const size_t buckA = arrays.sizeA / BUCKET_SIZE + (arrays.sizeA % BUCKET_SIZE) != 0;
const size_t buckB = arrays.sizeB / BUCKET_SIZE + (arrays.sizeB % BUCKET_SIZE) != 0;
const size_t tot_buckets = buckA + buckB;
real_t **ptr_bucket = (real_t**)malloc(tot_buckets*sizeof(real_t*));
size_t *sz_bucket = (size_t*)malloc(tot_buckets*sizeof(size_t));
rng_state_t *states = (rng_state_t*)malloc((size_t)4*tot_buckets*sizeof(rng_state_t));
if (ptr_bucket == NULL || sz_bucket == NULL || states == NULL)
{
free(ptr_bucket);
free(sz_bucket);
free(states);
return 1;
}
for (size_t ix = 0; ix < tot_buckets; ix++)
sz_bucket[ix] = BUCKET_SIZE;
memcpy(states, initial_state, 4*sizeof(rng_state_t));
for (size_t ix = 1; ix < tot_buckets; ix++)
{
memcpy(states + (size_t)4*ix, states + (size_t)4*(ix-(size_t)1), 4*sizeof(rng_state_t));
#ifdef USE_XOSHIRO128
xoshiro128pp_jump(states + 4*ix);
#else
xoshiro256pp_jump(states + 4*ix);
#endif
}
real_t ** const ptr_bucket_ = ptr_bucket;
size_t * const sz_bucket_ = sz_bucket;
fill_rnorm_buckets(
buckA, arrays.A, arrays.sizeA,
ptr_bucket, sz_bucket, BUCKET_SIZE
);
ptr_bucket += buckA; sz_bucket += buckA;
fill_rnorm_buckets(
buckB, arrays.B, arrays.sizeB,
ptr_bucket, sz_bucket, BUCKET_SIZE
);
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix;
#endif
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(states)
for (size_t_for ix = 0; ix < tot_buckets; ix++)
{
rng_state_t state[] = {states[(size_t)4*ix],
states[(size_t)4*ix + (size_t)1],
states[(size_t)4*ix + (size_t)2],
states[(size_t)4*ix + (size_t)3]};
rnorm_xoshiro(ptr_bucket_[ix], sz_bucket_[ix], state);
}
free(ptr_bucket_);
free(sz_bucket_);
free(states);
return 0;
}
void reduce_mat_sum(real_t *restrict outp, size_t lda, real_t *restrict inp,
int_t m, int_t n, int nthreads)
{
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long row;
#endif
size_t m_by_n = m * n;
if (n > 1 || lda > 0)
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(outp, inp, m, n, nthreads)
for (size_t_for row = 0; row < (size_t)m; row++)
for (size_t tid = 0; tid < (size_t)nthreads; tid++)
for (size_t col = 0; col < (size_t)n; col++)
outp[col + row*lda] += inp[tid*m_by_n + col + row*n];
else
for (size_t tid = 0; tid < (size_t)nthreads; tid++)
for (size_t row = 0; row < (size_t)m; row++)
outp[row] += inp[tid*m_by_n + row];
}
void exp_neg_x(real_t *restrict arr, size_t n, int nthreads)
{
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix;
#endif
#pragma omp parallel for schedule(static) num_threads(nthreads) shared(arr, n)
for (size_t_for ix = 0; ix < n; ix++)
arr[ix] = exp_t(-arr[ix]);
}
void add_to_diag(real_t *restrict A, real_t val, size_t n)
{
for (size_t ix = 0; ix < n; ix++)
A[ix + ix*n] += val;
}
real_t sum_sq_div_w(real_t *restrict arr, real_t *restrict w, size_t n, bool compensated, int nthreads)
{
real_t res = 0;
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long ix;
#endif
#pragma omp parallel for schedule(static) num_threads(nthreads) shared(arr, w, n) reduction(+:res)
for (size_t_for ix = 0; ix < n; ix++)
res += square(arr[ix]) / w[ix];
return res;
}
/* X <- alpha*A*B + X | A(m,k) is sparse CSR, B(k,n) is dense */
void tgemm_sp_dense
(
int_t m, int_t n, real_t alpha,
size_t indptr[], int_t indices[], real_t values[],
real_t DenseMat[], size_t ldb,
real_t OutputMat[], size_t ldc,
int nthreads
)
{
if (m <= 0 || indptr[0] == indptr[m])
return;
real_t *row_ptr;
#if defined(_OPENMP) && \
( (_OPENMP < 200801) /* OpenMP < 3.0 */ \
|| defined(_WIN32) || defined(_WIN64) \
)
long long row;
#endif
if (alpha != 1.)
#pragma omp parallel for schedule(dynamic) num_threads(nthreads) \
shared(m, n, alpha, ldb, ldc, OutputMat, DenseMat, indptr, indices, values) \
private(row_ptr)
for (size_t_for row = 0; row < (size_t)m; row++) {
row_ptr = OutputMat + row*ldc;
for (size_t col = indptr[row]; col < indptr[row+1]; col++)
cblas_taxpy(n, alpha*values[col], DenseMat + (size_t)indices[col]*ldb, 1, row_ptr, 1);
}
else
#pragma omp parallel for schedule(dynamic) num_threads(nthreads) \
shared(m, n, ldb, ldc, OutputMat, DenseMat, indptr, indices, values) \
private(row_ptr)
for (size_t_for row = 0; row < (size_t)m; row++) {
row_ptr = OutputMat + row*ldc;
for (size_t col = indptr[row]; col < indptr[row+1]; col++)
cblas_taxpy(n, values[col], DenseMat + (size_t)indices[col]*ldb, 1, row_ptr, 1);
}
}
/* x <- alpha*t(A)*v + x | A[m,n] is dense, v[m] is sparse, x[n] is dense */
void tgemv_dense_sp
(
int_t m, int_t n,
real_t alpha, real_t DenseMat[], size_t lda,
int_t ixB[], real_t vec_sp[], size_t nnz,
real_t OutputVec[]
)
{
if (alpha != 1.)
for (size_t ix = 0; ix < nnz; ix++)
cblas_taxpy(n, alpha*vec_sp[ix], DenseMat + (size_t)ixB[ix]*lda, 1, OutputVec, 1);
else
for (size_t ix = 0; ix < nnz; ix++)
cblas_taxpy(n, vec_sp[ix], DenseMat + (size_t)ixB[ix]*lda, 1, OutputVec, 1);
}
/* Same but with an array of weights */
void tgemv_dense_sp_weighted
(
int_t m, int_t n,
real_t alpha[], real_t DenseMat[], size_t lda,
int_t ixB[], real_t vec_sp[], size_t nnz,
real_t OutputVec[]
)
{
for (size_t ix = 0; ix < nnz; ix++)
cblas_taxpy(n, alpha[ix]*vec_sp[ix], DenseMat + (size_t)ixB[ix]*lda, 1, OutputVec, 1);
}
/* Same, but with both array of weights and scalar weight */
void tgemv_dense_sp_weighted2
(
int_t m, int_t n,
real_t alpha[], real_t alpha2, real_t DenseMat[], size_t lda,
int_t ixB[], real_t vec_sp[], size_t nnz,
real_t OutputVec[]
)
{
for (size_t ix = 0; ix < nnz; ix++)
cblas_taxpy(n, alpha2*alpha[ix]*vec_sp[ix], DenseMat + (size_t)ixB[ix]*lda, 1, OutputVec, 1);
}
void tgemv_dense_sp_notrans
(
int_t m, int_t n,
real_t DenseMat[], int_t lda,
int_t ixB[], real_t vec_sp[], size_t nnz,
real_t OutputVec[]
)
{
for (size_t ix = 0; ix < nnz; ix++)
cblas_taxpy(m, vec_sp[ix],
DenseMat + ixB[ix], lda,
OutputVec, 1);
}
/* B[:m,:n] := A[:m,:n] */
void copy_mat
(
int_t m, int_t n,
real_t *restrict A, int_t lda,
real_t *restrict B, int_t ldb
)
{
char uplo = '?';
if (m == 0 && n == 0) return;
if (ldb == n && lda == n)
memcpy(B, A, (size_t)m*(size_t)n*sizeof(real_t));
else
tlacpy_(&uplo, &n, &m, A, &lda, B, &ldb);
}
/* B[:m,:n] = A[:m,:n] + B[:m,:n] */
void sum_mat
(
size_t m, size_t n,
real_t *restrict A, size_t lda,
real_t *restrict B, size_t ldb
)
{
/* Note1: do NOT change this to axpy, it gets a huge slow-down when
used with MKL for some reason. OpenBLAS still works fine though */
/* Note2: in most cases it is expected that m >> n */
for (size_t row = 0; row < m; row++)
for (size_t col = 0; col < n; col++)
B[col + row*ldb] += A[col + row*lda];
}
void transpose_mat(real_t *restrict A, size_t m, size_t n, real_t *restrict buffer_real_t)
{
memcpy(buffer_real_t, A, m*n*sizeof(real_t));
for (size_t row = 0; row < m; row++)
for (size_t col = 0; col < n; col++)
A[row + col*m] = buffer_real_t[col + row*n];
}
void transpose_mat2(real_t *restrict A, size_t m, size_t n, real_t *restrict outp)
{
for (size_t row = 0; row < m; row++)
for (size_t col = 0; col < n; col++)
outp[row + col*m] = A[col + row*n];
}
void transpose_mat3
(
real_t *restrict A, size_t lda,
size_t m, size_t n,
real_t *restrict outp, size_t ldb
)
{
for (size_t row = 0; row < m; row++)
for (size_t col = 0; col < n; col++)
outp[row + col*ldb] = A[col + row*lda];
}
int_t coo_to_csr_plus_alloc
(
int_t *restrict Xrow, int_t *restrict Xcol, real_t *restrict Xval,
real_t *restrict W,
int_t m, int_t n, size_t nnz,
size_t *restrict *csr_p, int_t *restrict *csr_i, real_t *restrict *csr_v,
real_t *restrict *csr_w
)
{
*csr_p = (size_t*)malloc(((size_t)m+(size_t)1)*sizeof(size_t));
*csr_i = (int_t*)malloc(nnz*sizeof(int_t));
*csr_v = (real_t*)malloc(nnz*sizeof(real_t));
if (*csr_p == NULL || *csr_i == NULL || *csr_v == NULL)
return 1;
if (W != NULL) {
*csr_w = (real_t*)malloc(nnz*sizeof(real_t));
if (*csr_w == NULL) return 1;
}
coo_to_csr(
Xrow, Xcol, Xval,
W,
m, n, nnz,
*csr_p, *csr_i, *csr_v,
(W == NULL)? ((real_t*)NULL) : (*csr_w)
);
return 0;
}
void coo_to_csr
(
int_t *restrict Xrow, int_t *restrict Xcol, real_t *restrict Xval,
real_t *restrict W,
int_t m, int_t n, size_t nnz,
size_t *restrict csr_p, int_t *restrict csr_i, real_t *restrict csr_v,
real_t *restrict csr_w
)
{
bool has_mem = true;
int_t *cnt_byrow = NULL;
produce_p:
{
memset(csr_p, 0, ((size_t)m+(size_t)1)*sizeof(size_t));
for (size_t ix = 0; ix < nnz; ix++)
csr_p[Xrow[ix]+(size_t)1]++;
for (int_t row = 0; row < m; row++)
csr_p[row+(size_t)1] += csr_p[row];
}
if (!has_mem) goto cleanup;
cnt_byrow = (int_t*)calloc(m, sizeof(int_t));
if (cnt_byrow != NULL)
{
if (W == NULL)
for (size_t ix = 0; ix < nnz; ix++) {
csr_v[csr_p[Xrow[ix]] + cnt_byrow[Xrow[ix]]] = Xval[ix];
csr_i[csr_p[Xrow[ix]] + cnt_byrow[Xrow[ix]]++] = Xcol[ix];
}
else
for (size_t ix = 0; ix < nnz; ix++) {
csr_w[csr_p[Xrow[ix]] + cnt_byrow[Xrow[ix]]] = W[ix];
csr_v[csr_p[Xrow[ix]] + cnt_byrow[Xrow[ix]]] = Xval[ix];
csr_i[csr_p[Xrow[ix]] + cnt_byrow[Xrow[ix]]++] = Xcol[ix];
}
goto cleanup;
}
else
{
if (W == NULL)
for (size_t ix = 0; ix < nnz; ix++) {
csr_i[--csr_p[Xrow[ix]+(size_t)1]] = Xcol[ix];
csr_v[csr_p[Xrow[ix]+(size_t)1]] = Xval[ix];
}
else
for (size_t ix = 0; ix < nnz; ix++) {
csr_i[--csr_p[Xrow[ix]+(size_t)1]] = Xcol[ix];
csr_v[csr_p[Xrow[ix]+(size_t)1]] = Xval[ix];
csr_w[csr_p[Xrow[ix]+(size_t)1]] = W[ix];
}
has_mem = false;
goto produce_p;
}
cleanup:
free(cnt_byrow);
}
void coo_to_csr_and_csc
(
int_t *restrict Xrow, int_t *restrict Xcol, real_t *restrict Xval,
real_t *restrict W, int_t m, int_t n, size_t nnz,
size_t *restrict csr_p, int_t *restrict csr_i, real_t *restrict csr_v,
size_t *restrict csc_p, int_t *restrict csc_i, real_t *restrict csc_v,
real_t *restrict csr_w, real_t *restrict csc_w,
int nthreads
)
{
bool has_mem = true;
nthreads = (nthreads > 2)? 2 : 1;
int_t *cnt_byrow = NULL;
int_t *cnt_bycol = NULL;
produce_p:
{
memset(csr_p, 0, ((size_t)m+(size_t)1)*sizeof(size_t));
memset(csc_p, 0, ((size_t)n+(size_t)1)*sizeof(size_t));
for (size_t ix = 0; ix < nnz; ix++) {
csr_p[Xrow[ix]+(size_t)1]++;
csc_p[Xcol[ix]+(size_t)1]++;
}
for (int_t row = 0; row < m; row++)
csr_p[row+(size_t)1] += csr_p[row];
for (int_t col = 0; col < n; col++)
csc_p[col+(size_t)1] += csc_p[col];
}
if (!has_mem) goto cleanup;
cnt_byrow = (int_t*)calloc(m, sizeof(int_t));
cnt_bycol = (int_t*)calloc(n, sizeof(int_t));
#if defined(_OPENMP) && (_OPENMP > 201305) /* OpenMP >= 4.0 */
omp_set_max_active_levels(2);
#endif
if (cnt_byrow != NULL && cnt_bycol != NULL) {
#pragma omp parallel sections num_threads(nthreads)
{
#pragma omp section
{
if (W == NULL)
for (size_t ix = 0; ix < nnz; ix++) {
csr_v[csr_p[Xrow[ix]] + cnt_byrow[Xrow[ix]]] = Xval[ix];
csr_i[csr_p[Xrow[ix]] + cnt_byrow[Xrow[ix]]++] = Xcol[ix];
}
else
for (size_t ix = 0; ix < nnz; ix++) {
csr_w[csr_p[Xrow[ix]] + cnt_byrow[Xrow[ix]]] = W[ix];
csr_v[csr_p[Xrow[ix]] + cnt_byrow[Xrow[ix]]] = Xval[ix];
csr_i[csr_p[Xrow[ix]] + cnt_byrow[Xrow[ix]]++] = Xcol[ix];
}
}
#pragma omp section
{
if (W == NULL)
for (size_t ix = 0; ix < nnz; ix++) {
csc_v[csc_p[Xcol[ix]] + cnt_bycol[Xcol[ix]]] = Xval[ix];
csc_i[csc_p[Xcol[ix]] + cnt_bycol[Xcol[ix]]++] = Xrow[ix];
}
else
for (size_t ix = 0; ix < nnz; ix++) {
csc_w[csc_p[Xcol[ix]] + cnt_bycol[Xcol[ix]]] = W[ix];
csc_v[csc_p[Xcol[ix]] + cnt_bycol[Xcol[ix]]] = Xval[ix];
csc_i[csc_p[Xcol[ix]] + cnt_bycol[Xcol[ix]]++] = Xrow[ix];
}
}
}
goto cleanup;
}
else {
#pragma omp parallel sections num_threads(nthreads)
{
#pragma omp section
{
if (W == NULL)
for (size_t ix = 0; ix < nnz; ix++) {
csr_i[--csr_p[Xrow[ix]+(size_t)1]] = Xcol[ix];
csr_v[csr_p[Xrow[ix]+(size_t)1]] = Xval[ix];
}
else
for (size_t ix = 0; ix < nnz; ix++) {
csr_i[--csr_p[Xrow[ix]+(size_t)1]] = Xcol[ix];
csr_v[csr_p[Xrow[ix]+(size_t)1]] = Xval[ix];
csr_w[csr_p[Xrow[ix]+(size_t)1]] = W[ix];
}
}
#pragma omp section
{
if (W == NULL)
for (size_t ix = 0; ix < nnz; ix++) {
csc_i[--csc_p[Xcol[ix]+(size_t)1]] = Xrow[ix];
csc_v[csc_p[Xcol[ix]+(size_t)1]] = Xval[ix];
}
else
for (size_t ix = 0; ix < nnz; ix++) {
csc_i[--csc_p[Xcol[ix]+(size_t)1]] = Xrow[ix];
csc_v[csc_p[Xcol[ix]+(size_t)1]] = Xval[ix];
csc_w[csc_p[Xcol[ix]+(size_t)1]] = W[ix];
}
}
}
has_mem = false;
goto produce_p;
}
cleanup:
free(cnt_byrow);
free(cnt_bycol);
}
void row_means_csr(size_t indptr[], real_t *restrict values,
real_t *restrict output, int_t m, int nthreads)
{
int_t row = 0;
set_to_zero(values, m);
#pragma omp parallel for schedule(dynamic) num_threads(nthreads) \
shared(indptr, values, output, m)
for (row = 0; row < m; row++)
{
double rsum = 0;
for (size_t ix = indptr[row]; ix < indptr[row+(size_t)1]; ix++)
rsum += values[ix];
output[row] = rsum;
}
nthreads = cap_to_4(nthreads);
#pragma omp parallel for schedule(static) num_threads(nthreads) \
shared(indptr, output, m)
for (row = 0; row < m; row++)
output[row] /= (real_t)(indptr[row+(size_t)1] - indptr[row]);
}
bool should_stop_procedure = false;
bool handle_is_locked = false;
void set_interrup_global_variable(int_t s)
{
#pragma omp critical
{
should_stop_procedure = true;
}
}
int_t lbfgs_printer_collective
(
void *instance,
const real_t *x,
const real_t *g,
const real_t fx,
const real_t xnorm,
const real_t gnorm,
const real_t step,
size_t n,
int_t k,
int_t ls
)
{
((data_collective_fun_grad*)instance)->niter = k;
int_t print_every = ((data_collective_fun_grad*)instance)->print_every;
if ((k % print_every) == 0 && print_every > 0) {
printf("Iteration %-4d - f(x)= %-8.03g - ||g(x)||= %-8.03g - ls=% 2d\n",
k, fx, gnorm, ls);
fflush(stdout);
}
if (should_stop_procedure)
return 1;
return 0;
}
int_t lbfgs_printer_offsets
(
void *instance,
const real_t *x,
const real_t *g,
const real_t fx,
const real_t xnorm,
const real_t gnorm,
const real_t step,
size_t n,
int_t k,
int_t ls
)
{
((data_offsets_fun_grad*)instance)->niter = k;
int_t print_every = ((data_offsets_fun_grad*)instance)->print_every;
if ((k % print_every) == 0 && print_every > 0) {
printf("Iteration %-5d - f(x)= %-8.03g - ||g(x)||= %-8.03g - ls=% 2d\n",
k, fx, gnorm, ls);
fflush(stdout);
}
if (should_stop_procedure)
return 1;
return 0;
}
bool check_is_sorted(int_t arr[], int_t n)
{
if (n <= 1) return true;
for (int_t ix = 0; ix < n-1; ix++)
if (arr[ix] > arr[ix+1]) return false;
return true;
}
/* https://www.stat.cmu.edu/~ryantibs/median/quickselect.c */
/* Some sample C code for the quickselect algorithm,
taken from Numerical Recipes in C. */
#define SWAP(a,b) temp=(a);(a)=(b);(b)=temp;
void qs_argpartition(int_t arr[], real_t values[], int_t n, int_t k)
{
int_t i,ir,j,l,mid;
int_t a,temp;
l=0;
ir=n-1;
for(;;) {
if (ir <= l+1) {
if (ir == l+1 && values[arr[ir]] > values[arr[l]]) {
SWAP(arr[l],arr[ir]);
}
return;
}
else {
mid=(l+ir) >> 1;
SWAP(arr[mid],arr[l+1]);
if (values[arr[l]] < values[arr[ir]]) {
SWAP(arr[l],arr[ir]);
}
if (values[arr[l+1]] < values[arr[ir]]) {
SWAP(arr[l+1],arr[ir]);
}
if (values[arr[l]] < values[arr[l+1]]) {
SWAP(arr[l],arr[l+1]);
}
i=l+1;
j=ir;
a=arr[l+1];
for (;;) {
do i++; while (values[arr[i]] > values[a]);
do j--; while (values[arr[j]] < values[a]);
if (j < i) break;
SWAP(arr[i],arr[j]);
}
arr[l+1]=arr[j];
arr[j]=a;
if (j >= k) ir=j-1;
if (j <= k) l=i;
}
}
}
void append_ones_last_col
(
real_t *restrict orig, size_t m, size_t n,
real_t *restrict outp
)
{
copy_mat(m, n,
orig, n,
outp, n+1);
for (size_t ix = 0; ix < m; ix++)
outp[n + ix*(n+(size_t)1)] = 1.;
}
void fill_lower_triangle(real_t A[], size_t n, size_t lda)
{
for (size_t row = 1; row < n; row++)
for (size_t col = 0; col < row; col++)
A[col + row*lda] = A[row + col*lda];
}
void print_err_msg(const char *msg)
{
#ifndef _FOR_R
fprintf(stderr, "%s", msg);
#else
fprintf(stderr, msg);
#endif
fflush(stderr);
}
void print_oom_message(void)
{
print_err_msg("Error: could not allocate enough memory.\n");
}
#ifdef _FOR_PYTHON
#define PY_MSG_MAX_LENGTH 256
void py_printf(const char *fmt, ...)
{
char msg[PY_MSG_MAX_LENGTH];
va_list args;
va_start(args, fmt);
vsnprintf(msg, PY_MSG_MAX_LENGTH, fmt, args);
va_end(args);
cy_printf(msg);
}
void py_errprintf(void *ignored, const char *fmt, ...)
{
char msg[PY_MSG_MAX_LENGTH];
va_list args;
va_start(args, fmt);
vsnprintf(msg, PY_MSG_MAX_LENGTH, fmt, args);
va_end(args);
cy_errprintf(msg);
}
void python_printmsg(char *msg)
{
PySys_WriteStdout("%s", msg);
}
void python_printerrmsg(char *msg)
{
PySys_WriteStderr("%s", msg);
}
#endif
void act_on_interrupt(int retval, bool handle_interrupt, bool print_msg)
{
if (retval == 3)
{
if (print_msg)
print_err_msg(" Error: procedure was interrupted.\n");
if (!handle_interrupt)
raise(SIGINT);
}
}
#ifdef _FOR_R
void R_nan_to_C_nan(real_t arr[], size_t n)
{
for (size_t ix = 0; ix < n; ix++)
arr[ix] = ISNAN(arr[ix])? NAN : arr[ix];
}
#endif
long double compensated_sum(real_t *arr, size_t n)
{
long double err = 0.;
long double diff = 0.;
long double temp;
long double res = 0;
for (size_t ix = 0; ix < n; ix++)
{
diff = arr[ix] - err;
temp = res + diff;
err = (temp - res) - diff;
res = temp;
}
return res;
}
long double compensated_sum_product(real_t *restrict arr1, real_t *restrict arr2, size_t n)
{
long double err = 0.;
long double diff = 0.;
long double temp;
long double res = 0;
for (size_t ix = 0; ix < n; ix++)
{
diff = fmal(arr1[ix], arr2[ix], -err);
temp = res + diff;
err = (temp - res) - diff;
res = temp;
}
return res;
}
#ifdef AVOID_BLAS_SYR
/* https://github.com/xianyi/OpenBLAS/issues/3237 */
void custom_syr(const int_t n, const real_t alpha, const real_t *restrict x, real_t *restrict A, const int_t lda)
{
real_t temp;
real_t *restrict Arow;
for (int i = 0; i < n; i++) {
temp = alpha*x[i];
Arow = A + (size_t)i*(size_t)lda;
for (int j = i; j < n; j++)
Arow[j] = fma_t(temp, x[j], Arow[j]);
}
}
#endif
void set_blas_threads(int nthreads_set, int *nthreads_curr)
{
#ifdef _FOR_R
/* https://gist.github.com/KRD1/2503984 */
if (!has_RhpcBLASctl || ptr_glob_lst == NULL || ptr_nthreads == NULL)
return;
int errinfo = 0;
if (nthreads_curr != NULL) {
SEXP nthreads_curr_R = R_tryEvalSilent(VECTOR_ELT(*ptr_glob_lst, 5),
VECTOR_ELT(*ptr_glob_lst, 0),
&errinfo);
if (!errinfo) {
*nthreads_curr = Rf_asInteger(nthreads_curr_R);
}
*nthreads_curr = max2(*nthreads_curr, 1);
}
*ptr_nthreads = nthreads_set;
errinfo = 0;
R_tryEvalSilent(VECTOR_ELT(*ptr_glob_lst, 4),
VECTOR_ELT(*ptr_glob_lst, 0),
&errinfo);
#elif defined(_FOR_PYTHON) && !defined(IS_PY_TEST)
if (nthreads_curr != NULL) {
*nthreads_curr = py_get_threads();
}
py_set_threads(nthreads_set);
#if defined(HAS_OPENBLAS)
openblas_set_num_threads(nthreads_set);
#endif
#elif defined(HAS_OPENBLAS)
if (nthreads_curr != NULL) {
*nthreads_curr = openblas_get_num_threads();
*nthreads_curr = max2(*nthreads_curr, 1);
}
openblas_set_num_threads(nthreads_set);
#elif defined(_OPENMP) && !defined(MKL_H) && !defined(HAS_MKL)
if (nthreads_curr != NULL) {
*nthreads_curr = omp_get_num_threads();
*nthreads_curr = max2(*nthreads_curr, 1);
}
omp_set_num_threads(nthreads_set);
#endif
}
#if defined(_FOR_R) && defined(WRAPPED_GELSD) && !defined(USE_FLOAT)
SEXP wrapper_GELSD(void *data)
{
Args_to_GELSD *data_ = (Args_to_GELSD*)data;
tgelsd_(data_->m, data_->n, data_->nrhs,
data_->A, data_->lda, data_->B, data_->ldb,
data_->S, data_->rcond, data_->rank,
data_->work, data_->lwork, data_->iwork,
data_->info);
return R_NilValue;
}
void clean_after_GELSD(void *cdata, Rboolean jump)
{
if (jump)
{
PointersToFree *cdata_ = (PointersToFree*)cdata;
for (size_t ix = 0; ix < cdata_->n_pointers; ix++)
free(cdata_->pointers[ix]);
GELSD_free_inputs = false;
}
}
#endif
bool get_has_openmp(void)
{
#ifdef _OPENMP
return true;
#else
return false;
#endif
}
|
fields_modifiers.c | // RUN: %libomp-compile-and-run
// REQUIRES: !abt
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#define XSTR(x) #x
#define STR(x) XSTR(x)
#define streqls(s1, s2) (!strcmp(s1, s2))
#define check(condition) \
if (!(condition)) { \
fprintf(stderr, "error: %s: %d: " STR(condition) "\n", __FILE__, \
__LINE__); \
exit(1); \
}
#define BUFFER_SIZE 1024
char buf[BUFFER_SIZE];
#pragma omp threadprivate(buf)
char* get_string(size_t check_needed) {
size_t needed = omp_capture_affinity(buf, BUFFER_SIZE, NULL);
//printf("buf = %s\n", buf);
check(needed < BUFFER_SIZE);
if (check_needed != 0) {
check(needed == check_needed);
}
return buf;
}
void check_thread_num_padded_rjustified() {
int i;
const char* formats[2] = {"%0.8{thread_num}", "%0.8n"};
for (i = 0; i < sizeof(formats)/sizeof(formats[0]); ++i) {
omp_set_affinity_format(formats[i]);
#pragma omp parallel num_threads(8)
{
int j;
int tid = omp_get_thread_num();
char ctid = '0' + (char)tid;
char* s = get_string(8);
for (j = 0; j < 7; ++j) {
check(s[j] == '0');
}
check(s[j] == ctid);
}
}
}
void check_thread_num_rjustified() {
int i;
const char* formats[2] = {"%.12{thread_num}", "%.12n"};
for (i = 0; i < sizeof(formats)/sizeof(formats[0]); ++i) {
omp_set_affinity_format(formats[i]);
#pragma omp parallel num_threads(8)
{
int j;
int tid = omp_get_thread_num();
char ctid = '0' + (char)tid;
char* s = get_string(12);
for (j = 0; j < 11; ++j) {
check(s[j] == ' ');
}
check(s[j] == ctid);
}
}
}
void check_thread_num_ljustified() {
int i;
const char* formats[2] = {"%5{thread_num}", "%5n"};
for (i = 0; i < sizeof(formats)/sizeof(formats[0]); ++i) {
omp_set_affinity_format(formats[i]);
#pragma omp parallel num_threads(8)
{
int j;
int tid = omp_get_thread_num();
char ctid = '0' + (char)tid;
char* s = get_string(5);
check(s[0] == ctid);
for (j = 1; j < 5; ++j) {
check(s[j] == ' ');
}
}
}
}
void check_thread_num_padded_ljustified() {
int i;
const char* formats[2] = {"%018{thread_num}", "%018n"};
for (i = 0; i < sizeof(formats)/sizeof(formats[0]); ++i) {
omp_set_affinity_format(formats[i]);
#pragma omp parallel num_threads(8)
{
int j;
int tid = omp_get_thread_num();
char ctid = '0' + (char)tid;
char* s = get_string(18);
check(s[0] == ctid);
for (j = 1; j < 18; ++j) {
check(s[j] == ' ');
}
}
}
}
int main(int argc, char** argv) {
check_thread_num_ljustified();
check_thread_num_rjustified();
check_thread_num_padded_ljustified();
check_thread_num_padded_rjustified();
return 0;
}
|
matrix.c | #include <omp.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <sys/time.h>
#include <math.h>
#define MATRIX_1 "matrix1.txt"
#define MATRIX_2 "matrix2.txt"
#define PRODUCT "product.txt"
#define MATRIX_SIZE 16
int main(int argc, char *argv[])
{
puts("\e[0;34m==>\e[0m Reading in values from the matrix files...");
double matrix1[MATRIX_SIZE][MATRIX_SIZE],
matrix2[MATRIX_SIZE][MATRIX_SIZE],
real_product[MATRIX_SIZE][MATRIX_SIZE],
calc_product[MATRIX_SIZE][MATRIX_SIZE];
{
FILE *matrix1_file = fopen(MATRIX_1, "r"),
*matrix2_file = fopen(MATRIX_2, "r"),
*product_file = fopen(PRODUCT, "r");
for (int i = 0; i < MATRIX_SIZE; ++i)
{
for (int j = 0; j < MATRIX_SIZE; ++j)
{
fscanf(matrix1_file, "%lf", &matrix1[i][j]);
fscanf(matrix2_file, "%lf", &matrix2[i][j]);
fscanf(product_file, "%lf", &real_product[i][j]);
}
}
fclose(matrix1_file);
fclose(matrix2_file);
fclose(product_file);
}
int block_size = 4;
if (argc > 1)
{
block_size = atoi(argv[1]);
if (MATRIX_SIZE % block_size != 0)
{
printf("\e[0;31m==> %d Block size is not divisible into the matrix\n", block_size);
exit(1);
}
}
int trials = 1;
if (argc > 2)
{
trials = atoi(argv[2]);
}
long long execution_times[trials];
printf("\e[0;34m==>\e[0m Running %d block matrix multiplcation trials...\n", trials);
for (int trial = 0; trial < trials; ++trial)
{
// Empty out the array
memset(calc_product, 0, sizeof(calc_product[0][0]) * MATRIX_SIZE * MATRIX_SIZE);
struct timeval time_start;
struct timeval time_end;
gettimeofday(&time_start, NULL);
// Do the calculations
for (int i = 0; i < MATRIX_SIZE; i += block_size)
{
for (int j = 0; j < MATRIX_SIZE; j += block_size)
{
#pragma omp parallel for collapse(2)
for (int x = 0; x < block_size; ++x)
{
for (int y = 0; y < block_size; ++y)
{
for (int k = 0; k < MATRIX_SIZE; ++k)
{
#pragma omp critical
calc_product[i + x][j + y] += matrix1[i + x][k] * matrix2[k][j + y];
}
}
}
}
}
gettimeofday(&time_end, NULL);
// Calculate how long it took to find the shortest paths
execution_times[trial] = 1000000LL
* (time_end.tv_sec - time_start.tv_sec)
+ (time_end.tv_usec - time_start.tv_usec);
// Check that our calculated value matches the real product
for (int i = 0; i < MATRIX_SIZE; ++i)
{
for (int j = 0; j < MATRIX_SIZE; ++j)
{
if (fabs(calc_product[i][j] - real_product[i][j]) > 0.001)
{
printf("%dx%d: %lf expected. Was %lf\n", i, j, real_product[i][j], calc_product[i][j]);
puts("\e[0;31m==> Calculated product differs from real product!");
exit(1);
}
}
}
}
long long min = INT_MAX,
avg = 0;
for (int i = 0; i < trials; ++i)
{
if (execution_times[i] < min)
{
min = execution_times[i];
}
avg += execution_times[i];
}
printf("avg: %lld\n", avg / trials);
printf("min: %lld\n", min);
puts("\e[0;32m==>\e[0m All calculated product matches real product");
return 0;
}
|
GB_unaryop__abs_uint8_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint8_fp32
// op(A') function: GB_tran__abs_uint8_fp32
// C type: uint8_t
// A type: float
// cast: uint8_t cij ; GB_CAST_UNSIGNED(cij,aij,8)
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint8_t z ; GB_CAST_UNSIGNED(z,x,8) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT8 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint8_fp32
(
uint8_t *restrict Cx,
const float *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint8_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_sgemm_pack16.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_pack16_avx512(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
// Mat bottom_im2col(size, maxk, inch, 64u, 16, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const float* bias = _bias;
// permute
Mat tmp;
if (size >= 12)
tmp.create(12 * maxk, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + (size % 12 % 4) / 2 + size % 12 % 2, 64u, 16, opt.workspace_allocator);
else if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 64u, 16, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 64u, 16, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 64u, 16, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 64u, 16, opt.workspace_allocator);
{
int nn_size = size / 12;
int remain_size_start = 0;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 12;
float* tmpptr = tmp.channel(i / 12);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 16;
for (int k = 0; k < maxk; k++)
{
// transpose 16x12
__m512 _r0 = _mm512_loadu_ps(img0);
__m512 _r1 = _mm512_loadu_ps(img0 + 16);
__m512 _r2 = _mm512_loadu_ps(img0 + 16 * 2);
__m512 _r3 = _mm512_loadu_ps(img0 + 16 * 3);
__m512 _r4 = _mm512_loadu_ps(img0 + 16 * 4);
__m512 _r5 = _mm512_loadu_ps(img0 + 16 * 5);
__m512 _r6 = _mm512_loadu_ps(img0 + 16 * 6);
__m512 _r7 = _mm512_loadu_ps(img0 + 16 * 7);
__m512 _r8 = _mm512_loadu_ps(img0 + 16 * 8);
__m512 _r9 = _mm512_loadu_ps(img0 + 16 * 9);
__m512 _ra = _mm512_loadu_ps(img0 + 16 * 10);
__m512 _rb = _mm512_loadu_ps(img0 + 16 * 11);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3);
__m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3);
__m512 _tmp4 = _mm512_unpacklo_ps(_r4, _r5);
__m512 _tmp5 = _mm512_unpackhi_ps(_r4, _r5);
__m512 _tmp6 = _mm512_unpacklo_ps(_r6, _r7);
__m512 _tmp7 = _mm512_unpackhi_ps(_r6, _r7);
__m512 _tmp8 = _mm512_unpacklo_ps(_r8, _r9);
__m512 _tmp9 = _mm512_unpackhi_ps(_r8, _r9);
__m512 _tmpa = _mm512_unpacklo_ps(_ra, _rb);
__m512 _tmpb = _mm512_unpackhi_ps(_ra, _rb);
__m512 _tmpc = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpd = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpe = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpf = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpg = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmph = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpi = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpj = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpk = _mm512_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpl = _mm512_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpm = _mm512_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpn = _mm512_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(3, 2, 3, 2));
_tmp0 = _mm512_shuffle_f32x4(_tmpc, _tmpg, _MM_SHUFFLE(2, 0, 2, 0));
_tmp1 = _mm512_shuffle_f32x4(_tmpk, _tmpd, _MM_SHUFFLE(2, 0, 2, 0));
_tmp2 = _mm512_shuffle_f32x4(_tmph, _tmpl, _MM_SHUFFLE(2, 0, 2, 0));
_tmp3 = _mm512_shuffle_f32x4(_tmpe, _tmpi, _MM_SHUFFLE(2, 0, 2, 0));
_tmp4 = _mm512_shuffle_f32x4(_tmpm, _tmpf, _MM_SHUFFLE(2, 0, 2, 0));
_tmp5 = _mm512_shuffle_f32x4(_tmpj, _tmpn, _MM_SHUFFLE(2, 0, 2, 0));
_tmp6 = _mm512_shuffle_f32x4(_tmpc, _tmpg, _MM_SHUFFLE(3, 1, 3, 1));
_tmp7 = _mm512_shuffle_f32x4(_tmpk, _tmpd, _MM_SHUFFLE(3, 1, 3, 1));
_tmp8 = _mm512_shuffle_f32x4(_tmph, _tmpl, _MM_SHUFFLE(3, 1, 3, 1));
_tmp9 = _mm512_shuffle_f32x4(_tmpe, _tmpi, _MM_SHUFFLE(3, 1, 3, 1));
_tmpa = _mm512_shuffle_f32x4(_tmpm, _tmpf, _MM_SHUFFLE(3, 1, 3, 1));
_tmpb = _mm512_shuffle_f32x4(_tmpj, _tmpn, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0));
_r3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0));
_r4 = _mm512_shuffle_f32x4(_tmp8, _tmp9, _MM_SHUFFLE(2, 0, 2, 0));
_r5 = _mm512_shuffle_f32x4(_tmpa, _tmpb, _MM_SHUFFLE(2, 0, 2, 0));
_r6 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r7 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_r8 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1));
_r9 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1));
_ra = _mm512_shuffle_f32x4(_tmp8, _tmp9, _MM_SHUFFLE(3, 1, 3, 1));
_rb = _mm512_shuffle_f32x4(_tmpa, _tmpb, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_storeu_ps(tmpptr, _r0);
_mm512_storeu_ps(tmpptr + 16, _r1);
_mm512_storeu_ps(tmpptr + 16 * 2, _r2);
_mm512_storeu_ps(tmpptr + 16 * 3, _r3);
_mm512_storeu_ps(tmpptr + 16 * 4, _r4);
_mm512_storeu_ps(tmpptr + 16 * 5, _r5);
_mm512_storeu_ps(tmpptr + 16 * 6, _r6);
_mm512_storeu_ps(tmpptr + 16 * 7, _r7);
_mm512_storeu_ps(tmpptr + 16 * 8, _r8);
_mm512_storeu_ps(tmpptr + 16 * 9, _r9);
_mm512_storeu_ps(tmpptr + 16 * 10, _ra);
_mm512_storeu_ps(tmpptr + 16 * 11, _rb);
img0 += size * 16;
tmpptr += 16 * 12;
}
}
}
remain_size_start += nn_size * 12;
nn_size = (size - remain_size_start) >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 16;
for (int k = 0; k < maxk; k++)
{
// transpose 16x8
__m512 _r0 = _mm512_loadu_ps(img0);
__m512 _r1 = _mm512_loadu_ps(img0 + 16);
__m512 _r2 = _mm512_loadu_ps(img0 + 16 * 2);
__m512 _r3 = _mm512_loadu_ps(img0 + 16 * 3);
__m512 _r4 = _mm512_loadu_ps(img0 + 16 * 4);
__m512 _r5 = _mm512_loadu_ps(img0 + 16 * 5);
__m512 _r6 = _mm512_loadu_ps(img0 + 16 * 6);
__m512 _r7 = _mm512_loadu_ps(img0 + 16 * 7);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3);
__m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3);
__m512 _tmp4 = _mm512_unpacklo_ps(_r4, _r5);
__m512 _tmp5 = _mm512_unpackhi_ps(_r4, _r5);
__m512 _tmp6 = _mm512_unpacklo_ps(_r6, _r7);
__m512 _tmp7 = _mm512_unpackhi_ps(_r6, _r7);
__m512 _tmp8 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp9 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpa = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpb = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpc = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpd = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpe = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpf = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2));
_tmp0 = _mm512_shuffle_f32x4(_tmp8, _tmpc, _MM_SHUFFLE(2, 0, 2, 0));
_tmp1 = _mm512_shuffle_f32x4(_tmp9, _tmpd, _MM_SHUFFLE(2, 0, 2, 0));
_tmp2 = _mm512_shuffle_f32x4(_tmpa, _tmpe, _MM_SHUFFLE(2, 0, 2, 0));
_tmp3 = _mm512_shuffle_f32x4(_tmpb, _tmpf, _MM_SHUFFLE(2, 0, 2, 0));
_tmp4 = _mm512_shuffle_f32x4(_tmp8, _tmpc, _MM_SHUFFLE(3, 1, 3, 1));
_tmp5 = _mm512_shuffle_f32x4(_tmp9, _tmpd, _MM_SHUFFLE(3, 1, 3, 1));
_tmp6 = _mm512_shuffle_f32x4(_tmpa, _tmpe, _MM_SHUFFLE(3, 1, 3, 1));
_tmp7 = _mm512_shuffle_f32x4(_tmpb, _tmpf, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0));
_r3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0));
_r4 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r5 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_r6 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1));
_r7 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_storeu_ps(tmpptr, _r0);
_mm512_storeu_ps(tmpptr + 16, _r1);
_mm512_storeu_ps(tmpptr + 16 * 2, _r2);
_mm512_storeu_ps(tmpptr + 16 * 3, _r3);
_mm512_storeu_ps(tmpptr + 16 * 4, _r4);
_mm512_storeu_ps(tmpptr + 16 * 5, _r5);
_mm512_storeu_ps(tmpptr + 16 * 6, _r6);
_mm512_storeu_ps(tmpptr + 16 * 7, _r7);
img0 += size * 16;
tmpptr += 16 * 8;
}
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 16;
for (int k = 0; k < maxk; k++)
{
// transpose 16x4
__m512 _r0 = _mm512_loadu_ps(img0);
__m512 _r1 = _mm512_loadu_ps(img0 + 16);
__m512 _r2 = _mm512_loadu_ps(img0 + 16 * 2);
__m512 _r3 = _mm512_loadu_ps(img0 + 16 * 3);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3);
__m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3);
__m512 _tmp4 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp5 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmp6 = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp7 = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
_tmp0 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0));
_tmp1 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0));
_tmp2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1));
_tmp3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r2 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r3 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_storeu_ps(tmpptr, _r0);
_mm512_storeu_ps(tmpptr + 16, _r1);
_mm512_storeu_ps(tmpptr + 16 * 2, _r2);
_mm512_storeu_ps(tmpptr + 16 * 3, _r3);
img0 += size * 16;
tmpptr += 16 * 4;
}
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 16;
for (int k = 0; k < maxk; k++)
{
// transpose 16x2
__m512 _r0 = _mm512_loadu_ps(img0);
__m512 _r1 = _mm512_loadu_ps(img0 + 16);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
__m512 _tmp3 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_storeu_ps(tmpptr, _r0);
_mm512_storeu_ps(tmpptr + 16, _r1);
img0 += size * 16;
tmpptr += 16 * 2;
}
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
for (int q = 0; q < inch; q++)
{
const float* img0 = (const float*)bottom_im2col.channel(q) + i * 16;
for (int k = 0; k < maxk; k++)
{
__m512 _val = _mm512_loadu_ps(img0);
_mm512_storeu_ps(tmpptr, _val);
img0 += size * 16;
tmpptr += 16;
}
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr0 = top_blob.channel(p);
const float zeros[16] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p * 16 : zeros;
int i = 0;
for (; i + 11 < size; i += 12)
{
const float* tmpptr = tmp.channel(i / 12);
const float* kptr0 = kernel.channel(p);
int nn = inch * maxk * 16; // inch always > 0
__m512 _sum0 = _mm512_loadu_ps(biasptr);
__m512 _sum1 = _sum0;
__m512 _sum2 = _sum0;
__m512 _sum3 = _sum0;
__m512 _sum4 = _sum0;
__m512 _sum5 = _sum0;
__m512 _sum6 = _sum0;
__m512 _sum7 = _sum0;
__m512 _sum8 = _sum0;
__m512 _sum9 = _sum0;
__m512 _suma = _sum0;
__m512 _sumb = _sum0;
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_loadu_ps(kptr0);
__m512 _val0 = _mm512_set1_ps(tmpptr[0]);
__m512 _val1 = _mm512_set1_ps(tmpptr[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
__m512 _val2 = _mm512_set1_ps(tmpptr[2]);
__m512 _val3 = _mm512_set1_ps(tmpptr[3]);
_sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3);
__m512 _val4 = _mm512_set1_ps(tmpptr[4]);
__m512 _val5 = _mm512_set1_ps(tmpptr[5]);
_sum4 = _mm512_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm512_fmadd_ps(_val5, _w0, _sum5);
__m512 _val6 = _mm512_set1_ps(tmpptr[6]);
__m512 _val7 = _mm512_set1_ps(tmpptr[7]);
_sum6 = _mm512_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm512_fmadd_ps(_val7, _w0, _sum7);
__m512 _val8 = _mm512_set1_ps(tmpptr[8]);
__m512 _val9 = _mm512_set1_ps(tmpptr[9]);
_sum8 = _mm512_fmadd_ps(_val8, _w0, _sum8);
_sum9 = _mm512_fmadd_ps(_val9, _w0, _sum9);
__m512 _vala = _mm512_set1_ps(tmpptr[10]);
__m512 _valb = _mm512_set1_ps(tmpptr[11]);
_suma = _mm512_fmadd_ps(_vala, _w0, _suma);
_sumb = _mm512_fmadd_ps(_valb, _w0, _sumb);
tmpptr += 12;
kptr0 += 16;
}
_mm512_storeu_ps(outptr0, _sum0);
_mm512_storeu_ps(outptr0 + 16, _sum1);
_mm512_storeu_ps(outptr0 + 16 * 2, _sum2);
_mm512_storeu_ps(outptr0 + 16 * 3, _sum3);
_mm512_storeu_ps(outptr0 + 16 * 4, _sum4);
_mm512_storeu_ps(outptr0 + 16 * 5, _sum5);
_mm512_storeu_ps(outptr0 + 16 * 6, _sum6);
_mm512_storeu_ps(outptr0 + 16 * 7, _sum7);
_mm512_storeu_ps(outptr0 + 16 * 8, _sum8);
_mm512_storeu_ps(outptr0 + 16 * 9, _sum9);
_mm512_storeu_ps(outptr0 + 16 * 10, _suma);
_mm512_storeu_ps(outptr0 + 16 * 11, _sumb);
outptr0 += 16 * 12;
}
for (; i + 7 < size; i += 8)
{
const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
const float* kptr0 = kernel.channel(p);
int nn = inch * maxk * 16; // inch always > 0
__m512 _sum0 = _mm512_loadu_ps(biasptr);
__m512 _sum1 = _sum0;
__m512 _sum2 = _sum0;
__m512 _sum3 = _sum0;
__m512 _sum4 = _sum0;
__m512 _sum5 = _sum0;
__m512 _sum6 = _sum0;
__m512 _sum7 = _sum0;
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_loadu_ps(kptr0);
__m512 _val0 = _mm512_set1_ps(tmpptr[0]);
__m512 _val1 = _mm512_set1_ps(tmpptr[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
__m512 _val2 = _mm512_set1_ps(tmpptr[2]);
__m512 _val3 = _mm512_set1_ps(tmpptr[3]);
_sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3);
__m512 _val4 = _mm512_set1_ps(tmpptr[4]);
__m512 _val5 = _mm512_set1_ps(tmpptr[5]);
_sum4 = _mm512_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm512_fmadd_ps(_val5, _w0, _sum5);
__m512 _val6 = _mm512_set1_ps(tmpptr[6]);
__m512 _val7 = _mm512_set1_ps(tmpptr[7]);
_sum6 = _mm512_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm512_fmadd_ps(_val7, _w0, _sum7);
tmpptr += 8;
kptr0 += 16;
}
_mm512_storeu_ps(outptr0, _sum0);
_mm512_storeu_ps(outptr0 + 16, _sum1);
_mm512_storeu_ps(outptr0 + 16 * 2, _sum2);
_mm512_storeu_ps(outptr0 + 16 * 3, _sum3);
_mm512_storeu_ps(outptr0 + 16 * 4, _sum4);
_mm512_storeu_ps(outptr0 + 16 * 5, _sum5);
_mm512_storeu_ps(outptr0 + 16 * 6, _sum6);
_mm512_storeu_ps(outptr0 + 16 * 7, _sum7);
outptr0 += 16 * 8;
}
for (; i + 3 < size; i += 4)
{
const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* kptr0 = kernel.channel(p);
int nn = inch * maxk * 16; // inch always > 0
__m512 _sum0 = _mm512_loadu_ps(biasptr);
__m512 _sum1 = _sum0;
__m512 _sum2 = _sum0;
__m512 _sum3 = _sum0;
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_loadu_ps(kptr0);
__m512 _val0 = _mm512_set1_ps(tmpptr[0]);
__m512 _val1 = _mm512_set1_ps(tmpptr[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
__m512 _val2 = _mm512_set1_ps(tmpptr[2]);
__m512 _val3 = _mm512_set1_ps(tmpptr[3]);
_sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3);
tmpptr += 4;
kptr0 += 16;
}
_mm512_storeu_ps(outptr0, _sum0);
_mm512_storeu_ps(outptr0 + 16, _sum1);
_mm512_storeu_ps(outptr0 + 16 * 2, _sum2);
_mm512_storeu_ps(outptr0 + 16 * 3, _sum3);
outptr0 += 16 * 4;
}
for (; i + 1 < size; i += 2)
{
const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* kptr0 = kernel.channel(p);
int nn = inch * maxk * 16; // inch always > 0
__m512 _sum0 = _mm512_loadu_ps(biasptr);
__m512 _sum1 = _sum0;
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_loadu_ps(kptr0);
__m512 _val0 = _mm512_set1_ps(tmpptr[0]);
__m512 _val1 = _mm512_set1_ps(tmpptr[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
tmpptr += 2;
kptr0 += 16;
}
_mm512_storeu_ps(outptr0, _sum0);
_mm512_storeu_ps(outptr0 + 16, _sum1);
outptr0 += 16 * 2;
}
for (; i < size; i++)
{
const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* kptr0 = kernel.channel(p);
int nn = inch * maxk * 16; // inch always > 0
__m512 _sum = _mm512_loadu_ps(biasptr);
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_loadu_ps(kptr0);
__m512 _val0 = _mm512_set1_ps(tmpptr[0]);
_sum = _mm512_fmadd_ps(_val0, _w0, _sum);
tmpptr += 1;
kptr0 += 16;
}
_mm512_storeu_ps(outptr0, _sum);
outptr0 += 16;
}
}
}
static void convolution_im2col_sgemm_transform_kernel_pack16_avx512(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = 16b-16a-maxk-inch/16a-outch/16b
Mat kernel = _kernel.reshape(maxk, inch, outch);
kernel_tm.create(16 * 16 * maxk, inch / 16, outch / 16, (size_t)4u);
for (int q = 0; q + 15 < outch; q += 16)
{
float* g00 = kernel_tm.channel(q / 16);
for (int p = 0; p + 15 < inch; p += 16)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 16; i++)
{
for (int j = 0; j < 16; j++)
{
const float* k00 = kernel.channel(q + j).row(p + i);
g00[0] = k00[k];
g00++;
}
}
}
}
}
}
static void convolution_im2col_sgemm_pack16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 64u, 16, opt.workspace_allocator);
{
const int gap = (w * stride_h - outw * stride_w) * 16;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
float* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const float* sptr = img.row(dilation_h * u) + dilation_w * v * 16;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
__m512 _v = _mm512_load_ps(sptr);
_mm512_store_ps(ptr, _v);
sptr += stride_w * 16;
ptr += 16;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_pack16_avx512(bottom_im2col, top_blob, kernel, _bias, opt);
}
|
pr34694.c | /* PR middle-end/34694 */
/* { dg-do compile } */
/* { dg-options "-O -fopenmp -Wall" } */
int i;
void
foo ()
{
#pragma omp parallel
{
int j; /* { dg-message "note: 'j' was declared here" } */
i = j; /* { dg-warning "is used uninitialized" } */
}
}
|
c-tree.h | /* Definitions for C parsing and type checking.
Copyright (C) 1987-2013 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_C_TREE_H
#define GCC_C_TREE_H
#include "c-family/c-common.h"
#include "diagnostic.h"
/* struct lang_identifier is private to c-decl.c, but langhooks.c needs to
know how big it is. This is sanity-checked in c-decl.c. */
#define C_SIZEOF_STRUCT_LANG_IDENTIFIER \
(sizeof (struct c_common_identifier) + 3 * sizeof (void *))
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */
#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1 (TYPE)
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */
#define C_TYPE_FIELDS_VOLATILE(TYPE) TREE_LANG_FLAG_2 (TYPE)
/* In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE
nonzero if the definition of the type has already started. */
#define C_TYPE_BEING_DEFINED(TYPE) TYPE_LANG_FLAG_0 (TYPE)
/* In an incomplete RECORD_TYPE or UNION_TYPE, a list of variable
declarations whose type would be completed by completing that type. */
#define C_TYPE_INCOMPLETE_VARS(TYPE) TYPE_VFIELD (TYPE)
/* In an IDENTIFIER_NODE, nonzero if this identifier is actually a
keyword. C_RID_CODE (node) is then the RID_* value of the keyword,
and C_RID_YYCODE is the token number wanted by Yacc. */
#define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_0 (ID)
/* Record whether a type or decl was written with nonconstant size.
Note that TYPE_SIZE may have simplified to a constant. */
#define C_TYPE_VARIABLE_SIZE(TYPE) TYPE_LANG_FLAG_1 (TYPE)
#define C_DECL_VARIABLE_SIZE(TYPE) DECL_LANG_FLAG_0 (TYPE)
/* Record whether a type is defined inside a struct or union type.
This is used for -Wc++-compat. */
#define C_TYPE_DEFINED_IN_STRUCT(TYPE) TYPE_LANG_FLAG_2 (TYPE)
/* Record whether a typedef for type `int' was actually `signed int'. */
#define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP)
/* For a FUNCTION_DECL, nonzero if it was defined without an explicit
return type. */
#define C_FUNCTION_IMPLICIT_INT(EXP) DECL_LANG_FLAG_1 (EXP)
/* For a FUNCTION_DECL, nonzero if it was an implicit declaration. */
#define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP)
/* For FUNCTION_DECLs, evaluates true if the decl is built-in but has
been declared. */
#define C_DECL_DECLARED_BUILTIN(EXP) \
DECL_LANG_FLAG_3 (FUNCTION_DECL_CHECK (EXP))
/* For FUNCTION_DECLs, evaluates true if the decl is built-in, has a
built-in prototype and does not have a non-built-in prototype. */
#define C_DECL_BUILTIN_PROTOTYPE(EXP) \
DECL_LANG_FLAG_6 (FUNCTION_DECL_CHECK (EXP))
/* Record whether a decl was declared register. This is strictly a
front-end flag, whereas DECL_REGISTER is used for code generation;
they may differ for structures with volatile fields. */
#define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4 (EXP)
/* Record whether a decl was used in an expression anywhere except an
unevaluated operand of sizeof / typeof / alignof. This is only
used for functions declared static but not defined, though outside
sizeof and typeof it is set for other function decls as well. */
#define C_DECL_USED(EXP) DECL_LANG_FLAG_5 (FUNCTION_DECL_CHECK (EXP))
/* Record whether a variable has been declared threadprivate by
#pragma omp threadprivate. */
#define C_DECL_THREADPRIVATE_P(DECL) DECL_LANG_FLAG_3 (VAR_DECL_CHECK (DECL))
/* Nonzero for a decl which either doesn't exist or isn't a prototype.
N.B. Could be simplified if all built-in decls had complete prototypes
(but this is presently difficult because some of them need FILE*). */
#define C_DECL_ISNT_PROTOTYPE(EXP) \
(EXP == 0 \
|| (!prototype_p (TREE_TYPE (EXP)) \
&& !DECL_BUILT_IN (EXP)))
/* For FUNCTION_TYPE, a hidden list of types of arguments. The same as
TYPE_ARG_TYPES for functions with prototypes, but created for functions
without prototypes. */
#define TYPE_ACTUAL_ARG_TYPES(NODE) TYPE_LANG_SLOT_1 (NODE)
/* For a CONSTRUCTOR, whether some initializer contains a
subexpression meaning it is not a constant expression. */
#define CONSTRUCTOR_NON_CONST(EXPR) TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (EXPR))
/* Record parser information about an expression that is irrelevant
for code generation alongside a tree representing its value. */
struct c_expr
{
/* The value of the expression. */
tree value;
/* Record the original unary/binary operator of an expression, which may
have been changed by fold, STRING_CST for unparenthesized string
constants, C_MAYBE_CONST_EXPR for __builtin_constant_p calls
(even if parenthesized), for subexpressions, and for non-constant
initializers, or ERROR_MARK for other expressions (including
parenthesized expressions). */
enum tree_code original_code;
/* If not NULL, the original type of an expression. This will
differ from the type of the value field for an enum constant.
The type of an enum constant is a plain integer type, but this
field will be the enum type. */
tree original_type;
};
/* Type alias for struct c_expr. This allows to use the structure
inside the VEC types. */
typedef struct c_expr c_expr_t;
/* A varray of c_expr_t. */
/* Append a new c_expr_t element to V. */
#define C_EXPR_APPEND(V, ELEM) \
do { \
c_expr_t __elem = (ELEM); \
vec_safe_push (V, __elem); \
} while (0)
/* A kind of type specifier. Note that this information is currently
only used to distinguish tag definitions, tag references and typeof
uses. */
enum c_typespec_kind {
/* No typespec. This appears only in struct c_declspec. */
ctsk_none,
/* A reserved keyword type specifier. */
ctsk_resword,
/* A reference to a tag, previously declared, such as "struct foo".
This includes where the previous declaration was as a different
kind of tag, in which case this is only valid if shadowing that
tag in an inner scope. */
ctsk_tagref,
/* A reference to a tag, not previously declared in a visible
scope. */
ctsk_tagfirstref,
/* A definition of a tag such as "struct foo { int a; }". */
ctsk_tagdef,
/* A typedef name. */
ctsk_typedef,
/* An ObjC-specific kind of type specifier. */
ctsk_objc,
/* A typeof specifier. */
ctsk_typeof
};
/* A type specifier: this structure is created in the parser and
passed to declspecs_add_type only. */
struct c_typespec {
/* What kind of type specifier this is. */
enum c_typespec_kind kind;
/* Whether the expression has operands suitable for use in constant
expressions. */
bool expr_const_operands;
/* The specifier itself. */
tree spec;
/* An expression to be evaluated before the type specifier, in the
case of typeof specifiers, or NULL otherwise or if no such
expression is required for a particular typeof specifier. In
particular, when typeof is applied to an expression of variably
modified type, that expression must be evaluated in order to
determine array sizes that form part of the type, but the
expression itself (as opposed to the array sizes) forms no part
of the type and so needs to be recorded separately. */
tree expr;
};
/* A storage class specifier. */
enum c_storage_class {
csc_none,
csc_auto,
csc_extern,
csc_register,
csc_static,
csc_typedef
};
/* A type specifier keyword "void", "_Bool", "char", "int", "float",
"double", "_Decimal32", "_Decimal64", "_Decimal128", "_Fract", "_Accum",
or none of these. */
enum c_typespec_keyword {
cts_none,
cts_void,
cts_bool,
cts_char,
cts_int,
cts_float,
cts_int128,
cts_double,
cts_dfloat32,
cts_dfloat64,
cts_dfloat128,
cts_fract,
cts_accum
};
/* This enum lists all the possible declarator specifiers, storage
class or attribute that a user can write. There is at least one
enumerator per possible declarator specifier in the struct
c_declspecs below.
It is used to index the array of declspec locations in struct
c_declspecs. */
enum c_declspec_word {
cdw_typespec /* A catch-all for a typespec. */,
cdw_storage_class /* A catch-all for a storage class */,
cdw_attributes,
cdw_typedef,
cdw_explicit_signed,
cdw_deprecated,
cdw_default_int,
cdw_long,
cdw_long_long,
cdw_short,
cdw_signed,
cdw_unsigned,
cdw_complex,
cdw_inline,
cdw_noreturn,
cdw_thread,
cdw_const,
cdw_volatile,
cdw_restrict,
cdw_saturating,
cdw_alignas,
cdw_address_space,
cdw_number_of_elements /* This one must always be the last
enumerator. */
};
/* A sequence of declaration specifiers in C. When a new declaration
specifier is added, please update the enum c_declspec_word above
accordingly. */
struct c_declspecs {
source_location locations[cdw_number_of_elements];
/* The type specified, if a single type specifier such as a struct,
union or enum specifier, typedef name or typeof specifies the
whole type, or NULL_TREE if none or a keyword such as "void" or
"char" is used. Does not include qualifiers. */
tree type;
/* Any expression to be evaluated before the type, from a typeof
specifier. */
tree expr;
/* The attributes from a typedef decl. */
tree decl_attr;
/* When parsing, the attributes. Outside the parser, this will be
NULL; attributes (possibly from multiple lists) will be passed
separately. */
tree attrs;
/* The base-2 log of the greatest alignment required by an _Alignas
specifier, in bytes, or -1 if no such specifiers with nonzero
alignment. */
int align_log;
/* The storage class specifier, or csc_none if none. */
enum c_storage_class storage_class;
/* Any type specifier keyword used such as "int", not reflecting
modifiers such as "short", or cts_none if none. */
ENUM_BITFIELD (c_typespec_keyword) typespec_word : 8;
/* The kind of type specifier if one has been seen, ctsk_none
otherwise. */
ENUM_BITFIELD (c_typespec_kind) typespec_kind : 3;
/* Whether any expressions in typeof specifiers may appear in
constant expressions. */
BOOL_BITFIELD expr_const_operands : 1;
/* Whether any declaration specifiers have been seen at all. */
BOOL_BITFIELD declspecs_seen_p : 1;
/* Whether something other than a storage class specifier or
attribute has been seen. This is used to warn for the
obsolescent usage of storage class specifiers other than at the
start of the list. (Doing this properly would require function
specifiers to be handled separately from storage class
specifiers.) */
BOOL_BITFIELD non_sc_seen_p : 1;
/* Whether the type is specified by a typedef or typeof name. */
BOOL_BITFIELD typedef_p : 1;
/* Whether the type is explicitly "signed" or specified by a typedef
whose type is explicitly "signed". */
BOOL_BITFIELD explicit_signed_p : 1;
/* Whether the specifiers include a deprecated typedef. */
BOOL_BITFIELD deprecated_p : 1;
/* Whether the type defaulted to "int" because there were no type
specifiers. */
BOOL_BITFIELD default_int_p : 1;
/* Whether "long" was specified. */
BOOL_BITFIELD long_p : 1;
/* Whether "long" was specified more than once. */
BOOL_BITFIELD long_long_p : 1;
/* Whether "short" was specified. */
BOOL_BITFIELD short_p : 1;
/* Whether "signed" was specified. */
BOOL_BITFIELD signed_p : 1;
/* Whether "unsigned" was specified. */
BOOL_BITFIELD unsigned_p : 1;
/* Whether "complex" was specified. */
BOOL_BITFIELD complex_p : 1;
/* Whether "inline" was specified. */
BOOL_BITFIELD inline_p : 1;
/* Whether "_Noreturn" was speciied. */
BOOL_BITFIELD noreturn_p : 1;
/* Whether "__thread" was specified. */
BOOL_BITFIELD thread_p : 1;
/* Whether "const" was specified. */
BOOL_BITFIELD const_p : 1;
/* Whether "volatile" was specified. */
BOOL_BITFIELD volatile_p : 1;
/* Whether "restrict" was specified. */
BOOL_BITFIELD restrict_p : 1;
/* Whether "_Sat" was specified. */
BOOL_BITFIELD saturating_p : 1;
/* Whether any alignment specifier (even with zero alignment) was
specified. */
BOOL_BITFIELD alignas_p : 1;
/* The address space that the declaration belongs to. */
addr_space_t address_space;
};
/* The various kinds of declarators in C. */
enum c_declarator_kind {
/* An identifier. */
cdk_id,
/* A function. */
cdk_function,
/* An array. */
cdk_array,
/* A pointer. */
cdk_pointer,
/* Parenthesized declarator with nested attributes. */
cdk_attrs
};
typedef struct c_arg_tag_d {
/* The argument name. */
tree id;
/* The type of the argument. */
tree type;
} c_arg_tag;
/* Information about the parameters in a function declarator. */
struct c_arg_info {
/* A list of parameter decls. */
tree parms;
/* A list of structure, union and enum tags defined. */
vec<c_arg_tag, va_gc> *tags;
/* A list of argument types to go in the FUNCTION_TYPE. */
tree types;
/* A list of non-parameter decls (notably enumeration constants)
defined with the parameters. */
tree others;
/* A compound expression of VLA sizes from the parameters, or NULL.
In a function definition, these are used to ensure that
side-effects in sizes of arrays converted to pointers (such as a
parameter int i[n++]) take place; otherwise, they are
ignored. */
tree pending_sizes;
/* True when these arguments had [*]. */
BOOL_BITFIELD had_vla_unspec : 1;
};
/* A declarator. */
struct c_declarator {
/* The kind of declarator. */
enum c_declarator_kind kind;
location_t id_loc; /* Currently only set for cdk_id, cdk_array. */
/* Except for cdk_id, the contained declarator. For cdk_id, NULL. */
struct c_declarator *declarator;
union {
/* For identifiers, an IDENTIFIER_NODE or NULL_TREE if an abstract
declarator. */
tree id;
/* For functions. */
struct c_arg_info *arg_info;
/* For arrays. */
struct {
/* The array dimension, or NULL for [] and [*]. */
tree dimen;
/* The qualifiers inside []. */
int quals;
/* The attributes (currently ignored) inside []. */
tree attrs;
/* Whether [static] was used. */
BOOL_BITFIELD static_p : 1;
/* Whether [*] was used. */
BOOL_BITFIELD vla_unspec_p : 1;
} array;
/* For pointers, the qualifiers on the pointer type. */
int pointer_quals;
/* For attributes. */
tree attrs;
} u;
};
/* A type name. */
struct c_type_name {
/* The declaration specifiers. */
struct c_declspecs *specs;
/* The declarator. */
struct c_declarator *declarator;
};
/* A parameter. */
struct c_parm {
/* The declaration specifiers, minus any prefix attributes. */
struct c_declspecs *specs;
/* The attributes. */
tree attrs;
/* The declarator. */
struct c_declarator *declarator;
};
/* Used when parsing an enum. Initialized by start_enum. */
struct c_enum_contents
{
/* While defining an enum type, this is 1 plus the last enumerator
constant value. */
tree enum_next_value;
/* Nonzero means that there was overflow computing enum_next_value. */
int enum_overflow;
};
/* A type of reference to a static identifier in an inline
function. */
enum c_inline_static_type {
/* Identifier with internal linkage used in function that may be an
inline definition (i.e., file-scope static). */
csi_internal,
/* Modifiable object with static storage duration defined in
function that may be an inline definition (i.e., local
static). */
csi_modifiable
};
/* in c-parser.c */
extern void c_parse_init (void);
/* in c-aux-info.c */
extern void gen_aux_info_record (tree, int, int, int);
/* in c-decl.c */
struct c_spot_bindings;
struct c_struct_parse_info;
extern struct obstack parser_obstack;
extern tree c_break_label;
extern tree c_cont_label;
extern bool global_bindings_p (void);
extern void push_scope (void);
extern tree pop_scope (void);
extern void c_bindings_start_stmt_expr (struct c_spot_bindings *);
extern void c_bindings_end_stmt_expr (struct c_spot_bindings *);
extern void record_inline_static (location_t, tree, tree,
enum c_inline_static_type);
extern void c_init_decl_processing (void);
extern void c_print_identifier (FILE *, tree, int);
extern int quals_from_declspecs (const struct c_declspecs *);
extern struct c_declarator *build_array_declarator (location_t, tree,
struct c_declspecs *,
bool, bool);
extern tree build_enumerator (location_t, location_t, struct c_enum_contents *,
tree, tree);
extern tree check_for_loop_decls (location_t, bool);
extern void mark_forward_parm_decls (void);
extern void declare_parm_level (void);
extern void undeclared_variable (location_t, tree);
extern tree lookup_label_for_goto (location_t, tree);
extern tree declare_label (tree);
extern tree define_label (location_t, tree);
extern struct c_spot_bindings *c_get_switch_bindings (void);
extern void c_release_switch_bindings (struct c_spot_bindings *);
extern bool c_check_switch_jump_warnings (struct c_spot_bindings *,
location_t, location_t);
extern void finish_decl (tree, location_t, tree, tree, tree);
extern tree finish_enum (tree, tree, tree);
extern void finish_function (void);
extern tree finish_struct (location_t, tree, tree, tree,
struct c_struct_parse_info *);
extern struct c_arg_info *build_arg_info (void);
extern struct c_arg_info *get_parm_info (bool, tree);
extern tree grokfield (location_t, struct c_declarator *,
struct c_declspecs *, tree, tree *);
extern tree groktypename (struct c_type_name *, tree *, bool *);
extern tree grokparm (const struct c_parm *, tree *);
extern tree implicitly_declare (location_t, tree);
extern void keep_next_level (void);
extern void pending_xref_error (void);
extern void c_push_function_context (void);
extern void c_pop_function_context (void);
extern void push_parm_decl (const struct c_parm *, tree *);
extern struct c_declarator *set_array_declarator_inner (struct c_declarator *,
struct c_declarator *);
extern tree c_builtin_function (tree);
extern tree c_builtin_function_ext_scope (tree);
extern void shadow_tag (const struct c_declspecs *);
extern void shadow_tag_warned (const struct c_declspecs *, int);
extern tree start_enum (location_t, struct c_enum_contents *, tree);
extern int start_function (struct c_declspecs *, struct c_declarator *, tree);
extern tree start_decl (struct c_declarator *, struct c_declspecs *, bool,
tree);
extern tree start_struct (location_t, enum tree_code, tree,
struct c_struct_parse_info **);
extern void store_parm_decls (void);
extern void store_parm_decls_from (struct c_arg_info *);
extern tree xref_tag (enum tree_code, tree);
extern struct c_typespec parser_xref_tag (location_t, enum tree_code, tree);
extern struct c_parm *build_c_parm (struct c_declspecs *, tree,
struct c_declarator *);
extern struct c_declarator *build_attrs_declarator (tree,
struct c_declarator *);
extern struct c_declarator *build_function_declarator (struct c_arg_info *,
struct c_declarator *);
extern struct c_declarator *build_id_declarator (tree);
extern struct c_declarator *make_pointer_declarator (struct c_declspecs *,
struct c_declarator *);
extern struct c_declspecs *build_null_declspecs (void);
extern struct c_declspecs *declspecs_add_qual (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_type (location_t,
struct c_declspecs *,
struct c_typespec);
extern struct c_declspecs *declspecs_add_scspec (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_attrs (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_addrspace (source_location,
struct c_declspecs *,
addr_space_t);
extern struct c_declspecs *declspecs_add_alignas (source_location,
struct c_declspecs *, tree);
extern struct c_declspecs *finish_declspecs (struct c_declspecs *);
/* in c-objc-common.c */
extern bool c_objc_common_init (void);
extern bool c_missing_noreturn_ok_p (tree);
extern bool c_warn_unused_global_decl (const_tree);
extern void c_initialize_diagnostics (diagnostic_context *);
extern bool c_vla_unspec_p (tree x, tree fn);
/* in c-typeck.c */
extern int in_alignof;
extern int in_sizeof;
extern int in_typeof;
extern tree c_last_sizeof_arg;
extern struct c_switch *c_switch_stack;
extern tree c_objc_common_truthvalue_conversion (location_t, tree);
extern tree require_complete_type (tree);
extern int same_translation_unit_p (const_tree, const_tree);
extern int comptypes (tree, tree);
extern int comptypes_check_different_types (tree, tree, bool *);
extern bool c_vla_type_p (const_tree);
extern bool c_mark_addressable (tree);
extern void c_incomplete_type_error (const_tree, const_tree);
extern tree c_type_promotes_to (tree);
extern struct c_expr default_function_array_conversion (location_t,
struct c_expr);
extern struct c_expr default_function_array_read_conversion (location_t,
struct c_expr);
extern void mark_exp_read (tree);
extern tree composite_type (tree, tree);
extern tree build_component_ref (location_t, tree, tree);
extern tree build_array_ref (location_t, tree, tree);
extern tree build_external_ref (location_t, tree, int, tree *);
extern void pop_maybe_used (bool);
extern struct c_expr c_expr_sizeof_expr (location_t, struct c_expr);
extern struct c_expr c_expr_sizeof_type (location_t, struct c_type_name *);
extern struct c_expr parser_build_unary_op (location_t, enum tree_code,
struct c_expr);
extern struct c_expr parser_build_binary_op (location_t,
enum tree_code, struct c_expr,
struct c_expr);
extern tree build_conditional_expr (location_t, tree, bool, tree, tree,
tree, tree);
extern tree build_compound_expr (location_t, tree, tree);
extern tree c_cast_expr (location_t, struct c_type_name *, tree);
extern tree build_c_cast (location_t, tree, tree);
extern void store_init_value (location_t, tree, tree, tree);
extern void error_init (const char *);
extern void pedwarn_init (location_t, int opt, const char *);
extern void maybe_warn_string_init (tree, struct c_expr);
extern void start_init (tree, tree, int);
extern void finish_init (void);
extern void really_start_incremental_init (tree);
extern void push_init_level (int, struct obstack *);
extern struct c_expr pop_init_level (int, struct obstack *);
extern void set_init_index (tree, tree, struct obstack *);
extern void set_init_label (tree, struct obstack *);
extern void process_init_element (struct c_expr, bool, struct obstack *);
extern tree build_compound_literal (location_t, tree, tree, bool);
extern void check_compound_literal_type (location_t, struct c_type_name *);
extern tree c_start_case (location_t, location_t, tree);
extern void c_finish_case (tree);
extern tree build_asm_expr (location_t, tree, tree, tree, tree, tree, bool);
extern tree build_asm_stmt (tree, tree);
extern int c_types_compatible_p (tree, tree);
extern tree c_begin_compound_stmt (bool);
extern tree c_end_compound_stmt (location_t, tree, bool);
extern void c_finish_if_stmt (location_t, tree, tree, tree, bool);
extern void c_finish_loop (location_t, tree, tree, tree, tree, tree, bool);
extern tree c_begin_stmt_expr (void);
extern tree c_finish_stmt_expr (location_t, tree);
extern tree c_process_expr_stmt (location_t, tree);
extern tree c_finish_expr_stmt (location_t, tree);
extern tree c_finish_return (location_t, tree, tree);
extern tree c_finish_bc_stmt (location_t, tree *, bool);
extern tree c_finish_goto_label (location_t, tree);
extern tree c_finish_goto_ptr (location_t, tree);
extern tree c_expr_to_decl (tree, bool *, bool *);
extern tree c_begin_omp_parallel (void);
extern tree c_finish_omp_parallel (location_t, tree, tree);
extern tree c_begin_omp_task (void);
extern tree c_finish_omp_task (location_t, tree, tree);
extern tree c_finish_omp_clauses (tree);
extern tree c_build_va_arg (location_t, tree, tree);
extern tree c_finish_transaction (location_t, tree, int);
extern tree c_build_function_call_vec (location_t, tree, vec<tree, va_gc> *,
vec<tree, va_gc> *);
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement that specifies a return value is seen. */
extern int current_function_returns_value;
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement with no argument is seen. */
extern int current_function_returns_null;
/* Set to 0 at beginning of a function definition, set to 1 if
a call to a noreturn function is seen. */
extern int current_function_returns_abnormally;
/* Mode used to build pointers (VOIDmode means ptr_mode). */
extern enum machine_mode c_default_pointer_mode;
/* In c-decl.c */
extern void c_finish_incomplete_decl (tree);
extern void c_write_global_declarations (void);
/* In c-errors.c */
extern void pedwarn_c90 (location_t, int opt, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4);
extern void pedwarn_c99 (location_t, int opt, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4);
#endif /* ! GCC_C_TREE_H */
|
omp_parallel_private.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <stdlib.h>
#include "omp_testsuite.h"
//static int sum1 = 789;
int test_omp_parallel_private()
{
int sum, num_threads,sum1;
int known_sum;
sum = 0;
num_threads = 0;
#pragma omp parallel private(sum1)
{
int i;
sum1 = 7;
/*printf("sum1=%d\n",sum1);*/
#pragma omp for
for (i = 1; i < 1000; i++) {
sum1 = sum1 + i;
}
#pragma omp critical
{
sum = sum + sum1;
num_threads++;
}
}
known_sum = (999 * 1000) / 2 + 7 * num_threads;
return (known_sum == sum);
}
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_parallel_private()) {
num_failed++;
}
}
return num_failed;
}
|
omp_set_wait_policy_overhead.c | // 2-process
// set wait policy, test2(0):passive / test2(1):active
// uncomment omp_quiesce() and omp_begin2() to check the time for quiesce policy
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
#include <omp.h>
#include <sys/timeb.h>
#include <sys/syscall.h>
#include <unistd.h>
#include <sys/types.h>
#include <signal.h>
void *omp_parallel_foo(void *ptr);
/**Important: make sure you use num_threads clause in parallel direction and set it to the
* number of hardware cores, not the number of cores Linux gives or the default from OpenMP
*
* cat /proc/cpuinfo and check the processor id, core id and CPU model number so you can look up fron internet
* Lennon is Xeon CPU E5-2683 v3 @ 2.00GHz, it has two CPU for total 28 cores, but support upto 56 threads
* Paul is Xeon CPU E5-2695 v2 @ 2.40GHz, it has two CPU for total 24 cores, support upto 48 threads
* Fornax Intel® Xeon® E5-2699 v3 2.3GHz, it has two CPU for total 36 cores, support upto 72 threads.
*
* Use -O0 optimization
*/
int total_cores = 2;
void busy_waiting4(int time);
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
void test2(int type){
pid_t proc_id;
int i = 0;
double tt;
proc_id = fork();
if(type==1){
omp_set_wait_policy(1); // active
}else{
omp_set_wait_policy(0); // passive
}
if(proc_id==0){
tt = read_timer();
}
omp_set_nested(1);
for (i=0; i<3; i++){
if ((proc_id+i)%2==0){
printf("%d pid %d\n", proc_id, getpid());
busy_waiting4(1);
#pragma omp parallel num_threads(2)
{
int tid = omp_get_thread_num();
// printf("level 2 id %d\n", tid);
busy_waiting4(1);
int pid = getpid();
int thid = syscall(SYS_gettid);
printf("level 2 ompid:%d pid:%d tid:%d\n", tid, pid, thid);
}
// omp_quiesce();
// omp_begin2();
}else{
printf("%d pid %d\n", proc_id, getpid());
int tid = omp_get_thread_num();
// printf("level 1 id %d\n", tid);
#pragma omp parallel num_threads(2)
{
int tid = omp_get_thread_num();
busy_waiting4(1);
int pid = getpid();
int thid = syscall(SYS_gettid);
printf("level 2 ompid:%d pid:%d tid:%d\n", tid, pid, thid);
}
// omp_quiesce();
// omp_begin2();
busy_waiting4(1);
}
}
if( proc_id == 0){
tt = read_timer()-tt;
if(type==0){
printf("passive: %f\n", tt);
}else{
printf("active: %f\n", tt);
}
}
// while(1);
}
int main(int argc, char * argv[])
{
// test2(0);
test2(1);
}
void busy_waiting4(int second) {
clock_t ticks1, ticks2;
ticks1 = clock();
ticks2 = ticks1;
while((ticks2/CLOCKS_PER_SEC-ticks1/CLOCKS_PER_SEC) < second)
ticks2 = clock();
// printf("It took %ld ticks to wait one second.\n",ticks2-ticks1);
// printf("This value should be the same as CLOCKS_PER_SEC which is %d.\n",CLOCKS_PER_SEC);
}
|
target-12.c | #include <omp.h>
#include <stdlib.h>
int
main ()
{
int d = omp_get_default_device ();
int id = omp_get_initial_device ();
int err;
int q[128], i;
void *p;
if (d < 0 || d >= omp_get_num_devices ())
d = id;
for (i = 0; i < 128; i++)
q[i] = i;
p = omp_target_alloc (130 * sizeof (int), d);
if (p == NULL)
return 0;
if (omp_target_memcpy_rect (NULL, NULL, 0, 0, NULL, NULL, NULL, NULL, NULL,
d, id) < 3
|| omp_target_memcpy_rect (NULL, NULL, 0, 0, NULL, NULL, NULL, NULL,
NULL, id, d) < 3
|| omp_target_memcpy_rect (NULL, NULL, 0, 0, NULL, NULL, NULL, NULL,
NULL, id, id) < 3)
abort ();
if (omp_target_associate_ptr (q, p, 128 * sizeof (int), sizeof (int), d) == 0)
{
size_t volume[3] = { 128, 0, 0 };
size_t dst_offsets[3] = { 0, 0, 0 };
size_t src_offsets[3] = { 1, 0, 0 };
size_t dst_dimensions[3] = { 128, 0, 0 };
size_t src_dimensions[3] = { 128, 0, 0 };
if (omp_target_associate_ptr (q, p, 128 * sizeof (int), sizeof (int), d) != 0)
abort ();
if (omp_target_is_present (q, d) != 1
|| omp_target_is_present (&q[32], d) != 1
|| omp_target_is_present (&q[127], d) != 1)
abort ();
if (omp_target_memcpy (p, q, 128 * sizeof (int), sizeof (int), 0,
d, id) != 0)
abort ();
#pragma omp target if (d >= 0) device (d >= 0 ? d : 0) map(alloc:q[0:32]) map(from:err)
{
int j;
err = 0;
for (j = 0; j < 128; j++)
if (q[j] != j)
err = 1;
else
q[j] += 4;
}
if (err)
abort ();
if (omp_target_memcpy_rect (q, p, sizeof (int), 1, volume,
dst_offsets, src_offsets, dst_dimensions,
src_dimensions, id, d) != 0)
abort ();
for (i = 0; i < 128; i++)
if (q[i] != i + 4)
abort ();
volume[2] = 2;
volume[1] = 3;
volume[0] = 6;
dst_offsets[2] = 1;
dst_offsets[1] = 0;
dst_offsets[0] = 0;
src_offsets[2] = 1;
src_offsets[1] = 0;
src_offsets[0] = 3;
dst_dimensions[2] = 2;
dst_dimensions[1] = 3;
dst_dimensions[0] = 6;
src_dimensions[2] = 3;
src_dimensions[1] = 4;
src_dimensions[0] = 6;
if (omp_target_memcpy_rect (p, q, sizeof (int), 3, volume,
dst_offsets, src_offsets, dst_dimensions,
src_dimensions, d, id) != 0)
abort ();
#pragma omp target if (d >= 0) device (d >= 0 ? d : 0) map(alloc:q[0:32]) map(from:err)
{
int j, k, l;
err = 0;
for (j = 0; j < 6; j++)
for (k = 0; k < 3; k++)
for (l = 0; l < 2; l++)
if (q[j * 6 + k * 2 + l] != 3 * 12 + 4 + 1 + l + k * 3 + j * 12)
err = 1;
}
if (err)
abort ();
if (omp_target_memcpy (p, p, 10 * sizeof (int), 51 * sizeof (int),
111 * sizeof (int), d, d) != 0)
abort ();
#pragma omp target if (d >= 0) device (d >= 0 ? d : 0) map(alloc:q[0:32]) map(from:err)
{
int j;
err = 0;
for (j = 0; j < 10; j++)
if (q[50 + j] != q[110 + j])
err = 1;
}
if (err)
abort ();
if (omp_target_disassociate_ptr (q, d) != 0)
abort ();
}
omp_target_free (p, d);
return 0;
}
|
GB_unop__identity_int64_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int64_uint16)
// op(A') function: GB (_unop_tran__identity_int64_uint16)
// C type: int64_t
// A type: uint16_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = (int64_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int64_uint16)
(
int64_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int64_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
clang-282491-3.c | #include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#define N 100
typedef struct myvec{
size_t len;
double *data;
} myvec_t;
void init(myvec_t *s);
int main(){
myvec_t s;
s.data = (double *)calloc(N,sizeof(double));
s.len = N;
#pragma omp target map(s, s.data[:s.len])
init(&s);
printf("s.data[%d]=%lf\n",N-1,s.data[N-1]);
}
void init(myvec_t *s)
{ for(int i=0; i<s->len; i++) s->data[i]=i; }
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 4;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,3);t1++) {
lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6));
ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(max(1,ceild(24*t2-Nz+9,4)),3*t1+1),6*t1-6*t2+2);t3<=min(min(min(floord(4*Nt+Ny-9,4),floord(12*t1+Ny+15,4)),floord(24*t2+Ny+11,4)),floord(24*t1-24*t2+Nz+Ny+13,4));t3++) {
for (t4=max(max(max(max(0,ceild(3*t1-3*t2-254,256)),ceild(3*t1-510,512)),ceild(24*t2-Nz-2035,2048)),ceild(4*t3-Ny-2035,2048));t4<=min(min(min(min(floord(4*Nt+Nx-9,2048),floord(12*t1+Nx+15,2048)),floord(24*t2+Nx+11,2048)),floord(4*t3+Nx-9,2048)),floord(24*t1-24*t2+Nz+Nx+13,2048));t4++) {
for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(2048*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),Nt-1),3*t1+5),6*t2+4),t3-1),512*t4+510);t5++) {
for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) {
lbv=max(2048*t4,4*t5+4);
ubv=min(2048*t4+2047,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
GB_unaryop__abs_fp64_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_fp64_fp32
// op(A') function: GB_tran__abs_fp64_fp32
// C type: double
// A type: float
// cast: double cij = (double) aij
// unaryop: cij = fabs (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = fabs (x) ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_FP64 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_fp64_fp32
(
double *restrict Cx,
const float *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_fp64_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
oskar_evaluate_cross_power_omp.c | /*
* Copyright (c) 2014-2015, The University of Oxford
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the University of Oxford nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "correlate/oskar_evaluate_cross_power_omp.h"
#include "correlate/private_correlate_functions_inline.h"
#ifdef __cplusplus
extern "C" {
#endif
/* Single precision. */
void oskar_evaluate_cross_power_omp_f(const int num_sources,
const int num_stations, const float4c* restrict jones,
float4c* restrict out)
{
int i = 0;
float norm;
norm = 2.0f / (num_stations * (num_stations - 1));
#pragma omp parallel for private(i)
for (i = 0; i < num_sources; ++i)
{
int SP, SQ;
float4c val, p, q;
/* Calculate cross-power product at the source. */
oskar_clear_complex_matrix_f(&val);
for (SP = 0; SP < num_stations; ++SP)
{
/* Load data for first station. */
OSKAR_LOAD_MATRIX(p, jones, SP * num_sources + i);
/* Cross-correlate. */
for (SQ = SP + 1; SQ < num_stations; ++SQ)
{
/* Load data for second station. */
OSKAR_LOAD_MATRIX(q, jones, SQ * num_sources + i);
/* Multiply-add: val += p * conj(q). */
OSKAR_MULTIPLY_ADD_COMPLEX_CONJUGATE(val.a, p.a, q.a);
OSKAR_MULTIPLY_ADD_COMPLEX_CONJUGATE(val.a, p.b, q.b);
OSKAR_MULTIPLY_ADD_COMPLEX_CONJUGATE(val.b, p.a, q.c);
OSKAR_MULTIPLY_ADD_COMPLEX_CONJUGATE(val.b, p.b, q.d);
OSKAR_MULTIPLY_ADD_COMPLEX_CONJUGATE(val.c, p.c, q.a);
OSKAR_MULTIPLY_ADD_COMPLEX_CONJUGATE(val.c, p.d, q.b);
OSKAR_MULTIPLY_ADD_COMPLEX_CONJUGATE(val.d, p.c, q.c);
OSKAR_MULTIPLY_ADD_COMPLEX_CONJUGATE(val.d, p.d, q.d);
}
}
/* Calculate average by dividing by number of baselines. */
val.a.x *= norm;
val.a.y *= norm;
val.b.x *= norm;
val.b.y *= norm;
val.c.x *= norm;
val.c.y *= norm;
val.d.x *= norm;
val.d.y *= norm;
/* Store result. */
out[i] = val;
}
}
void oskar_evaluate_cross_power_scalar_omp_f(
const int num_sources, const int num_stations,
const float2* restrict jones, float2* restrict out)
{
int i = 0;
float norm;
norm = 2.0f / (num_stations * (num_stations - 1));
#pragma omp parallel for private(i)
for (i = 0; i < num_sources; ++i)
{
int SP, SQ;
float2 val1, val2, p, q;
/* Calculate cross-power product at the source. */
val1.x = 0.0f;
val1.y = 0.0f;
for (SP = 0; SP < num_stations; ++SP)
{
/* Load data for first station into shared memory. */
p = jones[SP * num_sources + i];
val2.x = 0.0f;
val2.y = 0.0f;
/* Cross-correlate. */
for (SQ = SP + 1; SQ < num_stations; ++SQ)
{
/* Load data for second station into registers. */
q = jones[SQ * num_sources + i];
/* Multiply-add: val += p * conj(q). */
OSKAR_MULTIPLY_ADD_COMPLEX_CONJUGATE(val2, p, q);
}
/* Accumulate partial sum (try to preserve numerical precision). */
val1.x += val2.x;
val1.y += val2.y;
}
/* Calculate average by dividing by number of baselines. */
val1.x *= norm;
val1.y *= norm;
/* Store result. */
out[i] = val1;
}
}
/* Double precision. */
void oskar_evaluate_cross_power_omp_d(const int num_sources,
const int num_stations, const double4c* restrict jones,
double4c* restrict out)
{
int i = 0;
double norm;
norm = 2.0 / (num_stations * (num_stations - 1));
#pragma omp parallel for private(i)
for (i = 0; i < num_sources; ++i)
{
int SP, SQ;
double4c val, p, q;
/* Calculate cross-power product at the source. */
oskar_clear_complex_matrix_d(&val);
for (SP = 0; SP < num_stations; ++SP)
{
/* Load data for first station. */
OSKAR_LOAD_MATRIX(p, jones, SP * num_sources + i);
/* Cross-correlate. */
for (SQ = SP + 1; SQ < num_stations; ++SQ)
{
/* Load data for second station. */
OSKAR_LOAD_MATRIX(q, jones, SQ * num_sources + i);
/* Multiply-add: val += p * conj(q). */
OSKAR_MULTIPLY_ADD_COMPLEX_CONJUGATE(val.a, p.a, q.a);
OSKAR_MULTIPLY_ADD_COMPLEX_CONJUGATE(val.a, p.b, q.b);
OSKAR_MULTIPLY_ADD_COMPLEX_CONJUGATE(val.b, p.a, q.c);
OSKAR_MULTIPLY_ADD_COMPLEX_CONJUGATE(val.b, p.b, q.d);
OSKAR_MULTIPLY_ADD_COMPLEX_CONJUGATE(val.c, p.c, q.a);
OSKAR_MULTIPLY_ADD_COMPLEX_CONJUGATE(val.c, p.d, q.b);
OSKAR_MULTIPLY_ADD_COMPLEX_CONJUGATE(val.d, p.c, q.c);
OSKAR_MULTIPLY_ADD_COMPLEX_CONJUGATE(val.d, p.d, q.d);
}
}
/* Calculate average by dividing by number of baselines. */
val.a.x *= norm;
val.a.y *= norm;
val.b.x *= norm;
val.b.y *= norm;
val.c.x *= norm;
val.c.y *= norm;
val.d.x *= norm;
val.d.y *= norm;
/* Store result. */
out[i] = val;
}
}
void oskar_evaluate_cross_power_scalar_omp_d(
const int num_sources, const int num_stations,
const double2* restrict jones, double2* restrict out)
{
int i = 0;
double norm;
norm = 2.0 / (num_stations * (num_stations - 1));
#pragma omp parallel for private(i)
for (i = 0; i < num_sources; ++i)
{
int SP, SQ;
double2 val1, val2, p, q;
/* Calculate cross-power product at the source. */
val1.x = 0.0;
val1.y = 0.0;
for (SP = 0; SP < num_stations; ++SP)
{
/* Load data for first station into shared memory. */
p = jones[SP * num_sources + i];
val2.x = 0.0;
val2.y = 0.0;
/* Cross-correlate. */
for (SQ = SP + 1; SQ < num_stations; ++SQ)
{
/* Load data for second station into registers. */
q = jones[SQ * num_sources + i];
/* Multiply-add: val += p * conj(q). */
OSKAR_MULTIPLY_ADD_COMPLEX_CONJUGATE(val2, p, q);
}
/* Accumulate partial sum (try to preserve numerical precision). */
val1.x += val2.x;
val1.y += val2.y;
}
/* Calculate average by dividing by number of baselines. */
val1.x *= norm;
val1.y *= norm;
/* Store result. */
out[i] = val1;
}
}
#ifdef __cplusplus
}
#endif
|
GB_unaryop__abs_uint32_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint32_bool
// op(A') function: GB_tran__abs_uint32_bool
// C type: uint32_t
// A type: bool
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint32_t z = (uint32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT32 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint32_bool
(
uint32_t *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint32_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
scalability.c | /**
* \file
* \brief libbomp test.
*/
/*
* Copyright (c) 2007, 2008, 2009, ETH Zurich.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
* ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
*/
#include <stdio.h>
#include <omp.h>
#include <stdlib.h>
#include <stdint.h>
#include <assert.h>
#ifdef POSIX
static inline uint64_t rdtsc(void)
{
uint32_t eax, edx;
__asm volatile ("rdtsc" : "=a" (eax), "=d" (edx));
return ((uint64_t)edx << 32) | eax;
}
#endif
#define N 10000000
int main(int argc, char *argv[])
{
uint64_t begin, end;
int i;
static int a[N];
#ifndef POSIX
bomp_custom_init();
#endif
assert(argc == 2);
omp_set_num_threads(atoi(argv[1]));
for (i=0;i<N;i++) a[i]= 2*i;
begin = rdtsc();
#pragma omp parallel for
for (i=0;i<N;i++) a[i]= 2*i;
end = rdtsc();
printf("Value of sum is %d, time taken %lu\n", 0, end - begin);
}
|
divsufsort.c | /*
* divsufsort.c for libdivsufsort
* Copyright (c) 2003-2008 Yuta Mori All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
//#include "divsufsort_private.h"
#include "../include/divsufsort_private.h"
#ifdef _OPENMP
# include <omp.h>
#endif
/*- Private Functions -*/
/* Sorts suffixes of type B*. */
static
saidx_t
sort_typeBstar(const sauchar_t *T, saidx_t *SA,
saidx_t *bucket_A, saidx_t *bucket_B,
saidx_t n) {
saidx_t *PAb, *ISAb, *buf;
#ifdef _OPENMP
saidx_t *curbuf;
saidx_t l;
#endif
saidx_t i, j, k, t, m, bufsize;
saint_t c0, c1;
#ifdef _OPENMP
saint_t d0, d1;
int tmp;
#endif
/* Initialize bucket arrays. */
for(i = 0; i < BUCKET_A_SIZE; ++i) { bucket_A[i] = 0; }
for(i = 0; i < BUCKET_B_SIZE; ++i) { bucket_B[i] = 0; }
/* Count the number of occurrences of the first one or two characters of each
type A, B and B* suffix. Moreover, store the beginning position of all
type B* suffixes into the array SA. */
for(i = n - 1, m = n, c0 = T[n - 1]; 0 <= i;) {
/* type A suffix. */
do { ++BUCKET_A(c1 = c0); } while((0 <= --i) && ((c0 = T[i]) >= c1));
if(0 <= i) {
/* type B* suffix. */
++BUCKET_BSTAR(c0, c1);
SA[--m] = i;
/* type B suffix. */
for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) {
++BUCKET_B(c0, c1);
}
}
}
m = n - m;
/*
note:
A type B* suffix is lexicographically smaller than a type B suffix that
begins with the same first two characters.
*/
/* Calculate the index of start/end point of each bucket. */
for(c0 = 0, i = 0, j = 0; c0 < ALPHABET_SIZE; ++c0) {
t = i + BUCKET_A(c0);
BUCKET_A(c0) = i + j; /* start point */
i = t + BUCKET_B(c0, c0);
for(c1 = c0 + 1; c1 < ALPHABET_SIZE; ++c1) {
j += BUCKET_BSTAR(c0, c1);
BUCKET_BSTAR(c0, c1) = j; /* end point */
i += BUCKET_B(c0, c1);
}
}
if(0 < m) {
/* Sort the type B* suffixes by their first two characters. */
PAb = SA + n - m; ISAb = SA + m;
for(i = m - 2; 0 <= i; --i) {
t = PAb[i], c0 = T[t], c1 = T[t + 1];
SA[--BUCKET_BSTAR(c0, c1)] = i;
}
t = PAb[m - 1], c0 = T[t], c1 = T[t + 1];
SA[--BUCKET_BSTAR(c0, c1)] = m - 1;
/* Sort the type B* substrings using sssort. */
#ifdef _OPENMP
tmp = omp_get_max_threads();
buf = SA + m, bufsize = (n - (2 * m)) / tmp;
c0 = ALPHABET_SIZE - 2, c1 = ALPHABET_SIZE - 1, j = m;
#pragma omp parallel default(shared) private(curbuf, k, l, d0, d1, tmp)
{
tmp = omp_get_thread_num();
curbuf = buf + tmp * bufsize;
k = 0;
for(;;) {
#pragma omp critical(sssort_lock)
{
if(0 < (l = j)) {
d0 = c0, d1 = c1;
do {
k = BUCKET_BSTAR(d0, d1);
if(--d1 <= d0) {
d1 = ALPHABET_SIZE - 1;
if(--d0 < 0) { break; }
}
} while(((l - k) <= 1) && (0 < (l = k)));
c0 = d0, c1 = d1, j = k;
}
}
if(l == 0) { break; }
sssort(T, PAb, SA + k, SA + l,
curbuf, bufsize, 2, n, *(SA + k) == (m - 1));
}
}
#else
buf = SA + m, bufsize = n - (2 * m);
for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) {
for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) {
i = BUCKET_BSTAR(c0, c1);
if(1 < (j - i)) {
sssort(T, PAb, SA + i, SA + j,
buf, bufsize, 2, n, *(SA + i) == (m - 1));
}
}
}
#endif
/* Compute ranks of type B* substrings. */
for(i = m - 1; 0 <= i; --i) {
if(0 <= SA[i]) {
j = i;
do { ISAb[SA[i]] = i; } while((0 <= --i) && (0 <= SA[i]));
SA[i + 1] = i - j;
if(i <= 0) { break; }
}
j = i;
do { ISAb[SA[i] = ~SA[i]] = j; } while(SA[--i] < 0);
ISAb[SA[i]] = j;
}
/* Construct the inverse suffix array of type B* suffixes using trsort. */
trsort(ISAb, SA, m, 1);
/* Set the sorted order of tyoe B* suffixes. */
for(i = n - 1, j = m, c0 = T[n - 1]; 0 <= i;) {
for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) >= c1); --i, c1 = c0) { }
if(0 <= i) {
t = i;
for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { }
SA[ISAb[--j]] = ((t == 0) || (1 < (t - i))) ? t : ~t;
}
}
/* Calculate the index of start/end point of each bucket. */
BUCKET_B(ALPHABET_SIZE - 1, ALPHABET_SIZE - 1) = n; /* end point */
for(c0 = ALPHABET_SIZE - 2, k = m - 1; 0 <= c0; --c0) {
i = BUCKET_A(c0 + 1) - 1;
for(c1 = ALPHABET_SIZE - 1; c0 < c1; --c1) {
t = i - BUCKET_B(c0, c1);
BUCKET_B(c0, c1) = i; /* end point */
/* Move all type B* suffixes to the correct position. */
for(i = t, j = BUCKET_BSTAR(c0, c1);
j <= k;
--i, --k) { SA[i] = SA[k]; }
}
BUCKET_BSTAR(c0, c0 + 1) = i - BUCKET_B(c0, c0) + 1; /* start point */
BUCKET_B(c0, c0) = i; /* end point */
}
}
return m;
}
/* Constructs the suffix array by using the sorted order of type B* suffixes. */
static
void
construct_SA(const sauchar_t *T, saidx_t *SA,
saidx_t *bucket_A, saidx_t *bucket_B,
saidx_t n, saidx_t m) {
saidx_t *i, *j, *k;
saidx_t s;
saint_t c0, c1, c2;
if(0 < m) {
/* Construct the sorted order of type B suffixes by using
the sorted order of type B* suffixes. */
for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
/* Scan the suffix array from right to left. */
for(i = SA + BUCKET_BSTAR(c1, c1 + 1),
j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
i <= j;
--j) {
if(0 < (s = *j)) {
assert(T[s] == c1);
assert(((s + 1) < n) && (T[s] <= T[s + 1]));
assert(T[s - 1] <= T[s]);
*j = ~s;
c0 = T[--s];
if((0 < s) && (T[s - 1] > c0)) { s = ~s; }
if(c0 != c2) {
if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
k = SA + BUCKET_B(c2 = c0, c1);
}
assert(k < j);
*k-- = s;
} else {
assert(((s == 0) && (T[s] == c1)) || (s < 0));
*j = ~s;
}
}
}
}
/* Construct the suffix array by using
the sorted order of type B suffixes. */
k = SA + BUCKET_A(c2 = T[n - 1]);
*k++ = (T[n - 2] < c2) ? ~(n - 1) : (n - 1);
/* Scan the suffix array from left to right. */
for(i = SA, j = SA + n; i < j; ++i) {
if(0 < (s = *i)) {
assert(T[s - 1] >= T[s]);
c0 = T[--s];
if((s == 0) || (T[s - 1] < c0)) { s = ~s; }
if(c0 != c2) {
BUCKET_A(c2) = k - SA;
k = SA + BUCKET_A(c2 = c0);
}
assert(i < k);
*k++ = s;
} else {
assert(s < 0);
*i = ~s;
}
}
}
/* Constructs the burrows-wheeler transformed string directly
by using the sorted order of type B* suffixes. */
static
saidx_t
construct_BWT(const sauchar_t *T, saidx_t *SA,
saidx_t *bucket_A, saidx_t *bucket_B,
saidx_t n, saidx_t m) {
saidx_t *i, *j, *k, *orig;
saidx_t s;
saint_t c0, c1, c2;
if(0 < m) {
/* Construct the sorted order of type B suffixes by using
the sorted order of type B* suffixes. */
for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
/* Scan the suffix array from right to left. */
for(i = SA + BUCKET_BSTAR(c1, c1 + 1),
j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
i <= j;
--j) {
if(0 < (s = *j)) {
assert(T[s] == c1);
assert(((s + 1) < n) && (T[s] <= T[s + 1]));
assert(T[s - 1] <= T[s]);
c0 = T[--s];
*j = ~((saidx_t)c0);
if((0 < s) && (T[s - 1] > c0)) { s = ~s; }
if(c0 != c2) {
if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
k = SA + BUCKET_B(c2 = c0, c1);
}
assert(k < j);
*k-- = s;
} else if(s != 0) {
*j = ~s;
#ifndef NDEBUG
} else {
assert(T[s] == c1);
#endif
}
}
}
}
/* Construct the BWTed string by using
the sorted order of type B suffixes. */
k = SA + BUCKET_A(c2 = T[n - 1]);
*k++ = (T[n - 2] < c2) ? ~((saidx_t)T[n - 2]) : (n - 1);
/* Scan the suffix array from left to right. */
for(i = SA, j = SA + n, orig = SA; i < j; ++i) {
if(0 < (s = *i)) {
assert(T[s - 1] >= T[s]);
c0 = T[--s];
*i = c0;
if((0 < s) && (T[s - 1] < c0)) { s = ~((saidx_t)T[s - 1]); }
if(c0 != c2) {
BUCKET_A(c2) = k - SA;
k = SA + BUCKET_A(c2 = c0);
}
assert(i < k);
*k++ = s;
} else if(s != 0) {
*i = ~s;
} else {
orig = i;
}
}
return orig - SA;
}
/*---------------------------------------------------------------------------*/
/*- Function -*/
saint_t
divsufsort(const sauchar_t *T, saidx_t *SA, saidx_t n) {
saidx_t *bucket_A, *bucket_B;
saidx_t m;
saint_t err = 0;
/* Check arguments. */
if((T == NULL) || (SA == NULL) || (n < 0)) { return -1; }
else if(n == 0) { return 0; }
else if(n == 1) { SA[0] = 0; return 0; }
else if(n == 2) { m = (T[0] < T[1]); SA[m ^ 1] = 0, SA[m] = 1; return 0; }
bucket_A = (saidx_t *)malloc(BUCKET_A_SIZE * sizeof(saidx_t));
bucket_B = (saidx_t *)malloc(BUCKET_B_SIZE * sizeof(saidx_t));
/* Suffixsort. */
if((bucket_A != NULL) && (bucket_B != NULL)) {
m = sort_typeBstar(T, SA, bucket_A, bucket_B, n);
construct_SA(T, SA, bucket_A, bucket_B, n, m);
} else {
err = -2;
}
free(bucket_B);
free(bucket_A);
return err;
}
saidx_t
divbwt(const sauchar_t *T, sauchar_t *U, saidx_t *A, saidx_t n) {
saidx_t *B;
saidx_t *bucket_A, *bucket_B;
saidx_t m, pidx, i;
/* Check arguments. */
if((T == NULL) || (U == NULL) || (n < 0)) { return -1; }
else if(n <= 1) { if(n == 1) { U[0] = T[0]; } return n; }
if((B = A) == NULL) { B = (saidx_t *)malloc((size_t)(n + 1) * sizeof(saidx_t)); }
bucket_A = (saidx_t *)malloc(BUCKET_A_SIZE * sizeof(saidx_t));
bucket_B = (saidx_t *)malloc(BUCKET_B_SIZE * sizeof(saidx_t));
/* Burrows-Wheeler Transform. */
if((B != NULL) && (bucket_A != NULL) && (bucket_B != NULL)) {
m = sort_typeBstar(T, B, bucket_A, bucket_B, n);
pidx = construct_BWT(T, B, bucket_A, bucket_B, n, m);
/* Copy to output string. */
U[0] = T[n - 1];
for(i = 0; i < pidx; ++i) { U[i + 1] = (sauchar_t)B[i]; }
for(i += 1; i < n; ++i) { U[i] = (sauchar_t)B[i]; }
pidx += 1;
} else {
pidx = -2;
}
free(bucket_B);
free(bucket_A);
if(A == NULL) { free(B); }
return pidx;
}
const char *
divsufsort_version(void) {
return "2.0.1-14-g5f60d6f";
// return PROJECT_VERSION_FULL;
}
|
GB_unaryop__minv_int32_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_int32_int16
// op(A') function: GB_tran__minv_int32_int16
// C type: int32_t
// A type: int16_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 32)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 32) ;
// casting
#define GB_CASTING(z, x) \
int32_t z = (int32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT32 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_int32_int16
(
int32_t *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_int32_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
proc.c | int g1 = 10, g2 = 20;
int foo(int a, int b, int c) {
a = a + b + c;
a = bar();
g1 = 10;
return a;
}
int bar() {
int z = 10;
z = foo(z, z, z);
z = bar();
return z;
}
int norecurse() {
g1 = g2++;
return g1;
}
int main() {
int x;
x = 0;
int y, z;
y = z = 0;
x = x + 4;
#pragma omp flush
x = norecurse();
if (0) {
x = foo(x, y, g1);
} else {
x = norecurse();
}
x = g1;
}
|
multMatrixOMP.h |
/*Make multiplication on OpenMP*/
void multMatrixOMP(int *A, int *B, int *C, const int nx, const int ny){
int *ia = A;
int *ib = B;
int *ic = C;
/*Share i,j and k*/
int i,j,k;
#pragma omp parallel for private(i,j,k) shared(ia, ib, ic)
for (i = 0; i < ny; i++)
{
for (j = 0; j < nx; j++)
{
float sum = 0.0;
for (k = 0; k < ny ; k++)
sum = sum + ia[i * nx + k] * ib[k * nx + j];
ic[i * nx + j] = sum;
}
}
return;
}
|
helpers.h | // MIT License
//
// Copyright (c) 2019 Oleksandr Tkachenko
// Cryptography and Privacy Engineering Group (ENCRYPTO)
// TU Darmstadt, Germany
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#pragma once
#include <flatbuffers/flatbuffers.h>
#include <fmt/format.h>
#include <random>
#include "condition.h"
#include "primitives/random/aes128_ctr_rng.h"
#include "typedefs.h"
namespace encrypto::motion {
/// \brief Returns a vector of \p length random unsigned integral values.
/// \tparam UnsignedIntegralType
/// \param length
template <typename UnsignedIntegralType,
typename = std::enable_if_t<std::is_unsigned_v<UnsignedIntegralType>>>
std::vector<UnsignedIntegralType> RandomVector(std::size_t length) {
auto& rng = Aes128CtrRng::GetThreadInstance();
const auto byte_size = sizeof(UnsignedIntegralType) * length;
std::vector<UnsignedIntegralType> vec(length);
rng.RandomBytes(reinterpret_cast<std::byte*>(vec.data()), byte_size);
return vec;
}
/// \brief Converts a vector of unsigned integral values to a vector of uint8_t
/// \tparam UnsignedIntegralType
/// \param values
template <typename UnsignedIntegralType,
typename = std::enable_if_t<std::is_unsigned_v<UnsignedIntegralType>>>
inline std::vector<std::uint8_t> ToByteVector(const std::vector<UnsignedIntegralType>& values) {
std::vector<std::uint8_t> result(reinterpret_cast<const std::uint8_t*>(values.data()),
reinterpret_cast<const std::uint8_t*>(values.data()) +
sizeof(UnsignedIntegralType) * values.size());
return result;
}
/// \brief Converts a vector of uint8_t to a vector of unsigned integral values
/// \tparam UnsignedIntegralType
/// \param buffer
template <typename UnsignedIntegralType,
typename = std::enable_if_t<std::is_unsigned_v<UnsignedIntegralType>>>
inline std::vector<UnsignedIntegralType> FromByteVector(const std::vector<std::uint8_t>& buffer) {
assert(buffer.size() % sizeof(UnsignedIntegralType) ==
0); // buffer length is multiple of the element size
std::vector<UnsignedIntegralType> result(sizeof(UnsignedIntegralType) * buffer.size());
std::copy(buffer.data(), buffer.data() + buffer.size(),
reinterpret_cast<std::uint8_t*>(result.data()));
return result;
}
/// \brief Converts a flatbuffer vector of uint8_t to a vector of unsigned integral values
/// \tparam UnsignedIntegralType
/// \param buffer
template <typename UnsignedIntegralType,
typename = std::enable_if_t<std::is_unsigned_v<UnsignedIntegralType>>>
inline std::vector<UnsignedIntegralType> FromByteVector(
const flatbuffers::Vector<std::uint8_t>& buffer) {
assert(buffer.size() % sizeof(UnsignedIntegralType) ==
0); // buffer length is multiple of the element size
std::vector<UnsignedIntegralType> result(buffer.size() / sizeof(UnsignedIntegralType));
std::copy(buffer.data(), buffer.data() + buffer.size(),
reinterpret_cast<std::uint8_t*>(result.data()));
return result;
}
/// \brief Adds each element in \p a and \p b and returns the result.
/// \tparam T type of the elements in the vectors. T must provide the += operator.
/// \param a
/// \param b
/// \return A vector containing at position i the sum the ith element in a and b.
/// \pre \p a and \p b must be of equal size.
template <typename T>
inline std::vector<T> AddVectors(const std::vector<T>& a, const std::vector<T>& b) {
assert(a.size() == b.size());
if (a.size() == 0) {
return {};
} // if empty input vector
std::vector<T> result = a;
#pragma omp simd
for (auto j = 0ull; j < result.size(); ++j) {
result.at(j) += b.at(j); // TODO: implement using AVX2 and AVX512
}
return result;
}
/// \brief Subtracts each element in \p a and \p b and returns the result.
/// \tparam T type of the elements in the vectors. T must provide the -= operator.
/// \param a
/// \param b
/// \return A vector containing at position i the difference the ith element in a and b.
/// \pre \p a and \p b must be of equal size.
template <typename T>
inline std::vector<T> SubVectors(const std::vector<T>& a, const std::vector<T>& b) {
assert(a.size() == b.size());
if (a.size() == 0) {
return {};
} // if empty input vector
std::vector<T> result = a;
for (auto j = 0ull; j < result.size(); ++j) {
result.at(j) -= b.at(j); // TODO: implement using AVX2 and AVX512
}
return result;
}
/// \brief Multiplies each element in \p a and \p b and returns the result.
/// \tparam T type of the elements in the vectors. T must provide the *= operator.
/// \param a
/// \param b
/// \return A vector containing at position i the product the ith element in a and b.
/// \pre \p a and \p b must be of equal size.
template <typename T>
inline std::vector<T> MultiplyVectors(std::vector<T> a, std::vector<T> b) {
assert(a.size() == b.size());
if (a.size() == 0) {
return {};
} // if empty input vector
std::vector<T> result = a;
for (auto j = 0ull; j < result.size(); ++j) {
result.at(j) *= b.at(j); // TODO: implement using AVX2 and AVX512
}
return result;
}
/// \brief Performs the AddVectors operation on an arbitrary number of vectors.
/// \tparam T type of the elements in the vectors. T must provide the += operator.
/// \param vectors A vector of vectors.
/// \return A vector containing at position i the sum of each element
/// at position i of the input vectors.
/// \pre All vectors in \p vectors must be of equal size.
template <typename T>
inline std::vector<T> AddVectors(std::vector<std::vector<T>>& vectors) {
if (vectors.size() == 0) {
return {};
} // if empty input vector
std::vector<T> result = vectors.at(0);
for (auto i = 1ull; i < vectors.size(); ++i) {
auto& inner_vector = vectors.at(i);
assert(inner_vector.size() == result.size()); // expect the vectors to be of the same size
for (auto j = 0ull; j < result.size(); ++j) {
result.at(j) += inner_vector.at(j); // TODO: implement using AVX2 and AVX512
}
}
return result;
}
/// \brief Performs the AddVectors operation on an arbitrary number of vectors.
/// \tparam T type of the elements in the vectors. T must provide the += operator.
/// \param vectors A vector of vectors.
/// \return A vector containing at position i the sum of each element
/// at position i of the input vectors.
/// \pre All vectors in \p vectors must be of equal size.
template <typename T>
inline std::vector<T> AddVectors(std::vector<std::vector<T>>&& vectors) {
return AddVectors(vectors);
}
// XXX two distinct vectors do not overlop, so I don't see the use for the restrict functions.
/// \brief Adds each element in \p a and \p b and returns the result.
/// It is assumed that the vectors do not overlap.
/// \tparam T type of the elements in the vectors. T must provide the binary + operator.
/// \param a
/// \param b
/// \return A vector containing at position i the sum the ith element in a and b.
/// \pre \p a and \p b must be of equal size.
template <typename T>
inline std::vector<T> RestrictAddVectors(const std::vector<T>& a, const std::vector<T>& b) {
assert(a.size() == b.size());
if (a.size() == 0) {
return {};
} // if empty input vector
std::vector<T> result(a.size());
const T* __restrict__ a_pointer{a.data()};
const T* __restrict__ b_pointer{b.data()};
T* __restrict__ result_pointer{result.data()};
std::transform(a_pointer, a_pointer + a.size(), b_pointer, result_pointer,
[](const T& a_value, const T& b_value) { return a_value + b_value; });
return result;
}
/// \brief Subtracts each element in \p a and \p b and returns the result.
/// It is assumed that the vectors do not overlap.
/// \tparam T type of the elements in the vectors. T must provide the binary - operator.
/// \param a
/// \param b
/// \return A vector containing at position i the difference the ith element in a and b.
/// \pre \p a and \p b must be of equal size.
template <typename T>
inline std::vector<T> RestrictSubVectors(const std::vector<T>& a, const std::vector<T>& b) {
assert(a.size() == b.size());
if (a.size() == 0) {
return {};
} // if empty input vector
std::vector<T> result(a.size());
const T* __restrict__ a_pointer{a.data()};
const T* __restrict__ b_pointer{b.data()};
T* __restrict__ result_pointer{result.data()};
std::transform(a_pointer, a_pointer + a.size(), b_pointer, result_pointer,
[](const T& a_value, const T& b_value) { return a_value - b_value; });
return result;
}
/// \brief Mulitiplies each element in \p a and \p b and returns the result.
/// It is assumed that the vectors do not overlap.
/// \tparam T type of the elements in the vectors. T must provide the binary * operator.
/// \param a
/// \param b
/// \return A vector containing at position i the product the ith element in a and b.
/// \pre \p a and \p b must be of equal size.
template <typename T>
inline std::vector<T> RestrictMulVectors(const std::vector<T>& a, const std::vector<T>& b) {
assert(a.size() == b.size());
if (a.size() == 0) {
return {};
} // if empty input vector
std::vector<T> result(a.size());
const T* __restrict__ a_pointer{a.data()};
const T* __restrict__ b_pointer{b.data()};
T* __restrict__ result_pointer{result.data()};
std::transform(a_pointer, a_pointer + a.size(), b_pointer, result_pointer,
[](const T& a_value, const T& b_value) { return a_value * b_value; });
return result;
}
/// \brief Returns the sum of each element in \p values.
/// \tparam T type of the elements in the vectors. T must provide the += operator.
/// \param values
template <typename T>
inline T SumReduction(const std::vector<T>& values) {
if (values.size() == 0) {
return 0;
} else if (values.size() == 1) {
return values.at(0);
} else {
T sum = 0;
#pragma omp parallel for reduction(+ : sum) default(none) shared(values)
for (auto i = 0ull; i < values.size(); ++i) {
sum += values.at(i);
}
return sum;
}
}
/// \brief Returns the difference of each element in \p values.
/// \tparam T type of the elements in the vectors. T must provide the -= operator.
/// \param values
template <typename T>
inline T SubReduction(const std::vector<T>& values) {
if (values.size() == 0) {
return 0;
} else {
T result = values.at(0);
for (auto i = 1ull; i < values.size(); ++i) {
result -= values.at(i);
}
return result;
}
}
/// \brief Returns the product of each element in \p values.
/// \tparam T type of the elements in the vectors. T must provide the *= operator.
/// \param values
template <typename T>
inline T MulReduction(const std::vector<T>& values) {
if (values.size() == 0) {
return 0;
} else {
T product = values.at(0);
for (auto i = 1ull; i < values.size(); ++i) {
product *= values.at(i);
}
return product;
}
}
/// \brief Returns the sum of each row in a matrix.
/// \tparam T type of the elements in the vectors. T must provide the += operator.
/// \param values A vector of vectors.
/// \return The resulting vector can be represented by the following graphic:
/// +----------+--------------------------------------------------+
/// | sum[0] = | values[0][0] + values[1][0] + ... + values[m][0] |
/// | ... | ................................................ |
/// | sum[n] = | values[0][n] + values[1][n] + ... + values[m][n] |
/// +----------+--------------------------------------------------+
/// \pre All vectors in \p vectors must be of equal size.
template <typename T>
inline std::vector<T> RowSumReduction(const std::vector<std::vector<T>>& values) {
if (values.size() == 0) {
return {};
} else {
std::vector<T> sum(values.at(0).size());
for (auto i = 1ull; i < values.size(); ++i) {
assert(values.at(0).size() == values.at(i).size());
}
for (auto i = 0ull; i < sum.size(); ++i) {
for (auto j = 0ull; j < values.size(); ++j) {
sum.at(i) += values.at(j).at(i);
}
}
return std::move(sum);
}
}
/// \brief Returns the difference of each row in a matrix.
/// \tparam T type of the elements in the vectors. T must provide the -= operator.
/// \param values A vector of vectors.
/// \return The resulting vector can be represented by the following graphic:
/// +-----------+--------------------------------------------------+
/// | diff[0] = | values[0][0] - values[1][0] - ... - values[m][0] |
/// | ... | ................................................ |
/// | diff[n] = | values[0][n] - values[1][n] - ... - values[m][n] |
/// +-----------+--------------------------------------------------+
/// \pre All vectors in \p vectors must be of equal size.
template <typename T>
inline std::vector<T> RowSubReduction(const std::vector<std::vector<T>>& values) {
if (values.size() == 0) {
return {};
} else {
std::vector<T> result = values.at(0);
for (auto i = 1ull; i < values.size(); ++i) {
assert(values.at(0).size() == values.at(i).size());
}
for (auto i = 0ull; i < result.size(); ++i) {
for (auto j = 1ull; j < values.size(); ++j) {
result.at(i) -= values.at(j).at(i);
}
}
return std::move(result);
}
}
/// \brief Returns the product of each row in a matrix.
/// \tparam T type of the elements in the vectors. T must provide the *= operator.
/// \param values A vector of vectors.
/// \return The resulting vector can be represented by the following graphic:
/// +-----------+--------------------------------------------------+
/// | prod[0] = | values[0][0] * values[1][0] * ... * values[m][0] |
/// | ... | ................................................ |
/// | prod[n] = | values[0][n] * values[1][n] * ... * values[m][n] |
/// +-----------+--------------------------------------------------+
/// \pre All vectors in \p vectors must be of equal size.
template <typename T>
inline std::vector<T> RowMulReduction(const std::vector<std::vector<T>>& values) {
if (values.size() == 0) {
return {};
} else {
std::vector<T> product(values.at(0).size(), 1);
for (auto i = 1ull; i < values.size(); ++i) {
assert(values.at(0).size() == values.at(i).size());
}
for (auto i = 0ull; i < product.size(); ++i) {
for (auto j = 0ull; j < values.size(); ++j) {
product.at(i) *= values.at(j).at(i);
}
}
return std::move(product);
}
}
/// \brief Check if unisgned integral value is a power of two.
/// \tparam UnsignedIntegralType
/// \param x
template <typename UnsignedIntegralType,
typename = std::enable_if_t<std::is_unsigned_v<UnsignedIntegralType>>>
bool IsPowerOfTwo(UnsignedIntegralType x) {
return x > 0 && (!(x & (x - 1)));
}
/// \brief Returns a hexadecimal string representation of the bytes stored in \p values
/// \param values
/// \param n Number of bytes.
inline std::string Hex(const std::uint8_t* values, std::size_t n) {
std::string buffer;
for (auto i = 0ull; i < n; ++i) {
buffer.append(fmt::format("{0:#x} ", values[i]));
}
buffer.erase(buffer.end() - 1); // remove the last whitespace
return buffer;
}
/// \brief Returns a hexadecimal string representation of the bytes stored in \p values
/// \param values
/// \param n Number of bytes.
inline std::string Hex(const std::byte* values, std::size_t n) {
return Hex(reinterpret_cast<const std::uint8_t*>(values), n);
}
/// \brief Returns a hexadecimal string representation of the bytes stored in \p values
/// \param values
template <std::size_t N>
inline std::string Hex(const std::array<std::byte, N>& values) {
return Hex(reinterpret_cast<const std::uint8_t*>(values.data()), values.size());
}
/// \brief Returns a hexadecimal string representation of the bytes stored in \p values
/// \param values
template <std::size_t N>
inline std::string Hex(const std::array<std::uint8_t, N>& values) {
return Hex(values.data(), values.size());
}
/// \brief Returns a hexadecimal string representation of the bytes stored in \p values
/// \param values
inline std::string Hex(const std::vector<std::uint8_t>& values) {
return Hex(values.data(), values.size());
}
/// \brief Returns a hexadecimal string representation of the bytes stored in \p values
/// \param values
inline std::string Hex(const std::vector<std::byte>& values) {
return Hex(values.data(), values.size());
}
/// \brief Returns a hexadecimal string representation of the bytes stored in \p values
/// \param values
inline std::string Hex(const std::vector<std::uint8_t>&& values) { return Hex(values); }
/// \brief Returns a string representation of the std::vector \p values
/// \tparam T Type of the element in the std::vector. Must provide an overload of to_string.
/// \param values
template <typename T>
inline std::string to_string(std::vector<T> values) {
using std::to_string;
std::string result;
for (auto& v : values) {
result.append(to_string(v) + " ");
}
return result;
}
/// XXX the std library implements operators for vector comparisions.
template <typename T>
inline bool Vectors(const std::vector<T>& a, const std::vector<T>& b) {
if (a.size() != b.size()) {
return false;
}
for (auto i = 0ull; i < a.size(); ++i) {
if (a.at(i) != b.at(i)) {
return false;
}
}
return true;
}
/// \brief Checks if all the vectors have the same size.
/// \param values A vector of vectors.
template <typename T>
inline bool Dimensions(const std::vector<std::vector<T>>& values) {
if (values.size() <= 1) {
return true;
} else {
auto first_size = values.at(0).size();
for (auto i = 1ull; i < values.size(); ++i) {
if (first_size != values.at(i).size()) {
return false;
}
}
}
return true;
}
/// \brief Divides two size_t and returns the ceiled quotient.
/// \param dividend
/// \param divisor
/// \pre Divisor is not 0.
std::size_t DivideAndCeil(std::size_t dividend, std::size_t divisor);
/// \brief Returns the number of bytes necessary to store \p bits bits.
/// \param bits
constexpr std::size_t BitsToBytes(const std::size_t bits) { return (bits + 7) / 8; }
} // namespace encrypto::motion
|
GB_unop__identity_int64_uint8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int64_uint8)
// op(A') function: GB (_unop_tran__identity_int64_uint8)
// C type: int64_t
// A type: uint8_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = (int64_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int64_uint8)
(
int64_t *Cx, // Cx and Ax may be aliased
const uint8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint8_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int64_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
private-clause.c | #include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
main()
{
int i, n = 7;
int a[n], suma=50;
for (i=0; i<n; i++)
a[i] = i;
#pragma omp parallel
{
suma=0;
#pragma omp for
for (i=0; i<n; i++)
{
suma = suma + a[i];
printf("thread %d suma a[%d] / ", omp_get_thread_num(), i);
}
printf("\n* thread %d suma= %d", omp_get_thread_num(), suma);
}
printf("\n");
printf("suma=%d \n",suma);
}
|
DenseMatrix.h | //=================================================================================================
/*!
// \file blaze/math/smp/openmp/DenseMatrix.h
// \brief Header file for the OpenMP-based dense matrix SMP implementation
//
// Copyright (C) 2012-2018 Klaus Iglberger - All Rights Reserved
//
// This file is part of the Blaze library. You can redistribute it and/or modify it under
// the terms of the New (Revised) BSD License. Redistribution and use in source and binary
// forms, with or without modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other materials
// provided with the distribution.
// 3. Neither the names of the Blaze development group nor the names of its contributors
// may be used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
// SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
// BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
*/
//=================================================================================================
#ifndef _BLAZE_MATH_SMP_OPENMP_DENSEMATRIX_H_
#define _BLAZE_MATH_SMP_OPENMP_DENSEMATRIX_H_
//*************************************************************************************************
// Includes
//*************************************************************************************************
#include <omp.h>
#include <blaze/math/Aliases.h>
#include <blaze/math/AlignmentFlag.h>
#include <blaze/math/constraints/SMPAssignable.h>
#include <blaze/math/expressions/DenseMatrix.h>
#include <blaze/math/expressions/SparseMatrix.h>
#include <blaze/math/functors/AddAssign.h>
#include <blaze/math/functors/Assign.h>
#include <blaze/math/functors/MultAssign.h>
#include <blaze/math/functors/SchurAssign.h>
#include <blaze/math/functors/SubAssign.h>
#include <blaze/math/simd/SIMDTrait.h>
#include <blaze/math/smp/ParallelSection.h>
#include <blaze/math/smp/SerialSection.h>
#include <blaze/math/smp/ThreadMapping.h>
#include <blaze/math/StorageOrder.h>
#include <blaze/math/typetraits/IsDenseMatrix.h>
#include <blaze/math/typetraits/IsSIMDCombinable.h>
#include <blaze/math/typetraits/IsSMPAssignable.h>
#include <blaze/math/views/Submatrix.h>
#include <blaze/system/SMP.h>
#include <blaze/util/algorithms/Min.h>
#include <blaze/util/Assert.h>
#include <blaze/util/EnableIf.h>
#include <blaze/util/FunctionTrace.h>
#include <blaze/util/StaticAssert.h>
#include <blaze/util/Types.h>
namespace blaze {
//=================================================================================================
//
// OPENMP-BASED ASSIGNMENT KERNELS
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP (compound) assignment of a dense matrix to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side dense matrix to be assigned.
// \param op The (compound) assignment operation.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP assignment of a dense
// matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side dense matrix
, bool SO2 // Storage order of the right-hand side dense matrix
, typename OP > // Type of the assignment operation
void openmpAssign( DenseMatrix<MT1,SO1>& lhs, const DenseMatrix<MT2,SO2>& rhs, OP op )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
using ET1 = ElementType_t<MT1>;
using ET2 = ElementType_t<MT2>;
constexpr bool simdEnabled( MT1::simdEnabled && MT2::simdEnabled && IsSIMDCombinable_v<ET1,ET2> );
constexpr size_t SIMDSIZE( SIMDTrait< ElementType_t<MT1> >::size );
const bool lhsAligned( (~lhs).isAligned() );
const bool rhsAligned( (~rhs).isAligned() );
const int threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t equalShare1( (~rhs).rows() / threadmap.first + addon1 );
const size_t rest1 ( equalShare1 & ( SIMDSIZE - 1UL ) );
const size_t rowsPerThread( ( simdEnabled && rest1 )?( equalShare1 - rest1 + SIMDSIZE ):( equalShare1 ) );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t equalShare2( (~rhs).columns() / threadmap.second + addon2 );
const size_t rest2 ( equalShare2 & ( SIMDSIZE - 1UL ) );
const size_t colsPerThread( ( simdEnabled && rest2 )?( equalShare2 - rest2 + SIMDSIZE ):( equalShare2 ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~rhs).rows() - row ) );
const size_t n( min( colsPerThread, (~rhs).columns() - column ) );
if( simdEnabled && lhsAligned && rhsAligned ) {
auto target( submatrix<aligned>( ~lhs, row, column, m, n ) );
const auto source( submatrix<aligned>( ~rhs, row, column, m, n ) );
op( target, source );
}
else if( simdEnabled && lhsAligned ) {
auto target( submatrix<aligned>( ~lhs, row, column, m, n ) );
const auto source( submatrix<unaligned>( ~rhs, row, column, m, n ) );
op( target, source );
}
else if( simdEnabled && rhsAligned ) {
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
const auto source( submatrix<aligned>( ~rhs, row, column, m, n ) );
op( target, source );
}
else {
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
const auto source( submatrix<unaligned>( ~rhs, row, column, m, n ) );
op( target, source );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP (compound) assignment of a sparse matrix to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side sparse matrix to be assigned.
// \param op The (compound) assignment operation.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP assignment of a sparse
// matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side sparse matrix
, bool SO2 // Storage order of the right-hand side sparse matrix
, typename OP > // Type of the assignment operation
void openmpAssign( DenseMatrix<MT1,SO1>& lhs, const SparseMatrix<MT2,SO2>& rhs, OP op )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
const size_t threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t rowsPerThread( (~rhs).rows() / threadmap.first + addon1 );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t colsPerThread( (~rhs).columns() / threadmap.second + addon2 );
#pragma omp for schedule(dynamic,1) nowait
for( size_t i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~lhs).rows() - row ) );
const size_t n( min( colsPerThread, (~lhs).columns() - column ) );
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
const auto source( submatrix<unaligned>( ~rhs, row, column, m, n ) );
op( target, source );
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// PLAIN ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be assigned.
// \return void
//
// This function implements the default OpenMP-based SMP assignment to a dense matrix. Due to
// the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_t< IsDenseMatrix_v<MT1> && ( !IsSMPAssignable_v<MT1> || !IsSMPAssignable_v<MT2> ) >
smpAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
assign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP assignment to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be assigned.
// \return void
//
// This function implements the OpenMP-based SMP assignment to a dense matrix. Due to the
// explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_t< IsDenseMatrix_v<MT1> && IsSMPAssignable_v<MT1> && IsSMPAssignable_v<MT2> >
smpAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
assign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
openmpAssign( ~lhs, ~rhs, Assign() );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// ADDITION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP addition assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be added.
// \return void
//
// This function implements the default OpenMP-based SMP addition assignment to a dense matrix.
// Due to the explicit application of the SFINAE principle, this function can only be selected
// by the compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_t< IsDenseMatrix_v<MT1> && ( !IsSMPAssignable_v<MT1> || !IsSMPAssignable_v<MT2> ) >
smpAddAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
addAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP addition assignment to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be added.
// \return void
//
// This function implements the OpenMP-based SMP addition assignment to a dense matrix. Due to
// the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_t< IsDenseMatrix_v<MT1> && IsSMPAssignable_v<MT1> && IsSMPAssignable_v<MT2> >
smpAddAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
addAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
openmpAssign( ~lhs, ~rhs, AddAssign() );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// SUBTRACTION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP subtracction assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be subtracted.
// \return void
//
// This function implements the default OpenMP-based SMP subtraction assignment to a dense matrix.
// Due to the explicit application of the SFINAE principle, this function can only be selected by
// the compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_t< IsDenseMatrix_v<MT1> && ( !IsSMPAssignable_v<MT1> || !IsSMPAssignable_v<MT2> ) >
smpSubAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
subAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP subtracction assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be subtracted.
// \return void
//
// This function implements the default OpenMP-based SMP subtraction assignment of a matrix to a
// dense matrix. Due to the explicit application of the SFINAE principle, this function can only
// be selected by the compiler in case both operands are SMP-assignable and the element types of
// both operands are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_t< IsDenseMatrix_v<MT1> && IsSMPAssignable_v<MT1> && IsSMPAssignable_v<MT2> >
smpSubAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
subAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
openmpAssign( ~lhs, ~rhs, SubAssign() );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// SCHUR PRODUCT ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP Schur product assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix for the Schur product.
// \return void
//
// This function implements the default OpenMP-based SMP Schur product assignment to a dense
// matrix. Due to the explicit application of the SFINAE principle, this function can only be
// selected by the compiler in case both operands are SMP-assignable and the element types of
// both operands are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_t< IsDenseMatrix_v<MT1> && ( !IsSMPAssignable_v<MT1> || !IsSMPAssignable_v<MT2> ) >
smpSchurAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
schurAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP Schur product assignment to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix for the Schur product.
// \return void
//
// This function implements the OpenMP-based SMP Schur product assignment to a dense matrix. Due
// to the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_t< IsDenseMatrix_v<MT1> && IsSMPAssignable_v<MT1> && IsSMPAssignable_v<MT2> >
smpSchurAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
schurAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
openmpAssign( ~lhs, ~rhs, SchurAssign() );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// MULTIPLICATION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP multiplication assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be multiplied.
// \return void
//
// This function implements the default OpenMP-based SMP multiplication assignment to a dense
// matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_t< IsDenseMatrix_v<MT1> >
smpMultAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
multAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// COMPILE TIME CONSTRAINT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
namespace {
BLAZE_STATIC_ASSERT( BLAZE_OPENMP_PARALLEL_MODE );
}
/*! \endcond */
//*************************************************************************************************
} // namespace blaze
#endif
|
jacobi_float.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/timeb.h>
#include <malloc.h>
#include <math.h>
#define REAL float
static double read_timer_ms() {
struct timeb tm;
ftime(&tm);
return (double) tm.time * 1000.0 + (double) tm.millitm;
}
/************************************************************
* program to solve a finite difference
* discretization of Helmholtz equation :
* (d2/dx2)u + (d2/dy2)u - alpha u = f
* using Jacobi iterative method.
*
* Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998
* Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998
*
* This c version program is translated by
* Chunhua Liao, University of Houston, Jan, 2005
*
* Directives are used in this code to achieve parallelism.
* All do loops are parallelized with default 'static' scheduling.
*
* Input : n - grid dimension in x direction
* m - grid dimension in y direction
* alpha - Helmholtz constant (always greater than 0.0)
* tol - error tolerance for iterative solver
* relax - Successice over relaxation parameter
* mits - Maximum iterations for iterative solver
*
* On output
* : u(n,m) - Dependent variable (solutions)
* : f(n,m) - Right hand side function
*************************************************************/
#define DEFAULT_DIMSIZE 256
void print_array(char *title, char *name, REAL *A, int n, int m) {
printf("%s:\n", title);
int i, j;
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
printf("%s[%d][%d]:%f ", name, i, j, A[i * m + j]);
}
printf("\n");
}
printf("\n");
}
/* subroutine initialize (n,m,alpha,dx,dy,u,f)
******************************************************
* Initializes data
* Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2)
*
******************************************************/
void initialize(int n, int m, REAL alpha, REAL *dx, REAL *dy, REAL *u_p, REAL *f_p) {
int i;
int j;
int xx;
int yy;
REAL (*u)[m] = (REAL (*)[m]) u_p;
REAL (*f)[m] = (REAL (*)[m]) f_p;
//double PI=3.1415926;
*dx = (2.0 / (n - 1));
*dy = (2.0 / (m - 1));
/* Initialize initial condition and RHS */
//#pragma omp parallel for private(xx,yy,j,i)
for (i = 0; i < n; i++)
for (j = 0; j < m; j++) {
xx = ((int) (-1.0 + (*dx * (i - 1))));
yy = ((int) (-1.0 + (*dy * (j - 1))));
u[i][j] = 0.0;
f[i][j] = (((((-1.0 * alpha) * (1.0 - (xx * xx)))
* (1.0 - (yy * yy))) - (2.0 * (1.0 - (xx * xx))))
- (2.0 * (1.0 - (yy * yy))));
}
}
/* subroutine error_check (n,m,alpha,dx,dy,u,f)
implicit none
************************************************************
* Checks error between numerical and exact solution
*
************************************************************/
void error_check(int n, int m, REAL alpha, REAL dx, REAL dy, REAL *u_p, REAL *f_p) {
int i;
int j;
REAL xx;
REAL yy;
REAL temp;
REAL error;
error = 0.0;
REAL (*u)[m] = (REAL (*)[m]) u_p;
REAL (*f)[m] = (REAL (*)[m]) f_p;
for (i = 0; i < n; i++)
for (j = 0; j < m; j++) {
xx = (-1.0 + (dx * (i - 1)));
yy = (-1.0 + (dy * (j - 1)));
temp = (u[i][j] - ((1.0 - (xx * xx)) * (1.0 - (yy * yy))));
error = (error + (temp * temp));
}
error = (sqrt(error) / (n * m));
printf("Solution Error: %2.6g\n", error);
}
void jacobi_seq(int n, int m, REAL dx, REAL dy, REAL alpha, REAL relax, REAL *u_p, REAL *f_p, REAL tol, int mits);
void jacobi_omp(int n, int m, REAL dx, REAL dy, REAL alpha, REAL relax, REAL *u_p, REAL *f_p, REAL tol, int mits);
int main(int argc, char *argv[]) {
int n = DEFAULT_DIMSIZE;
int m = DEFAULT_DIMSIZE;
REAL alpha = 0.0543;
REAL tol = 0.0000000001;
REAL relax = 1.0;
int mits = 5000;
/*fprintf(stderr, "Usage: jacobi [<n> <m> <alpha> <tol> <relax> <mits>]\n");
fprintf(stderr, "\tn - grid dimension in x direction, default: %d\n", n);
fprintf(stderr, "\tm - grid dimension in y direction, default: n if provided or %d\n", m);
fprintf(stderr, "\talpha - Helmholtz constant (always greater than 0.0), default: %g\n", alpha);
fprintf(stderr, "\ttol - error tolerance for iterative solver, default: %g\n", tol);
fprintf(stderr, "\trelax - Successice over relaxation parameter, default: %g\n", relax);
fprintf(stderr, "\tmits - Maximum iterations for iterative solver, default: %d\n", mits);*/
if (argc == 2) {
sscanf(argv[1], "%d", &n);
m = n;
}
else if (argc == 3) {
sscanf(argv[1], "%d", &n);
sscanf(argv[2], "%d", &m);
}
else if (argc == 4) {
sscanf(argv[1], "%d", &n);
sscanf(argv[2], "%d", &m);
sscanf(argv[3], "%g", &alpha);
}
else if (argc == 5) {
sscanf(argv[1], "%d", &n);
sscanf(argv[2], "%d", &m);
sscanf(argv[3], "%g", &alpha);
sscanf(argv[4], "%g", &tol);
}
else if (argc == 6) {
sscanf(argv[1], "%d", &n);
sscanf(argv[2], "%d", &m);
sscanf(argv[3], "%g", &alpha);
sscanf(argv[4], "%g", &tol);
sscanf(argv[5], "%g", &relax);
}
else if (argc == 7) {
sscanf(argv[1], "%d", &n);
sscanf(argv[2], "%d", &m);
sscanf(argv[3], "%g", &alpha);
sscanf(argv[4], "%g", &tol);
sscanf(argv[5], "%g", &relax);
sscanf(argv[6], "%d", &mits);
}
else {
/* the rest of arg ignored */
}
printf("jacobi %d %d %g %g %g %d\n", n, m, alpha, tol, relax, mits);
printf("------------------------------------------------------------------------------------------------------\n");
/** init the array */
REAL *u = (REAL *) malloc(sizeof(REAL) * n * m);
REAL *uomp = (REAL *) malloc(sizeof(REAL) * n * m);
REAL *f = (REAL *) malloc(sizeof(REAL) * n * m);
REAL dx; /* grid spacing in x direction */
REAL dy; /* grid spacing in y direction */
initialize(n, m, alpha, &dx, &dy, u, f);
memcpy(uomp, u, sizeof(REAL) * n * m);
//warming up
jacobi_seq(n, m, dx, dy, alpha, relax, u, f, tol, mits);
jacobi_omp(n, m, dx, dy, alpha, relax, uomp, f, tol, mits);
initialize(n, m, alpha, &dx, &dy, u, f);
memcpy(uomp, u, sizeof(REAL) * n * m);
int num_runs = 20;
double elapsed = 0;
for(int i = 0; i < 20; i++) {
double elapsed1 = read_timer_ms();
jacobi_seq(n, m, dx, dy, alpha, relax, u, f, tol, mits);
elapsed += read_timer_ms() - elapsed1;
}
printf("seq elasped time(ms): %4f\n", elapsed/num_runs);
//double mflops = (0.001 * mits * (n - 2) * (m - 2) * 13) / elapsed;
//printf("MFLOPS: %12.6g\n", mflops);
puts("================");
double elapsed2 = 0;
for(int i = 0; i < 20; i++) {
double elapsed3 = read_timer_ms();
jacobi_omp(n, m, dx, dy, alpha, relax, uomp, f, tol, mits);
elapsed2 += read_timer_ms() - elapsed3;
}
printf("OpenMP elasped time(ms): %4f\n", elapsed2/num_runs);
//mflops = (0.001 * mits * (n - 2) * (m - 2) * 13) / elapsed;
//printf("MFLOPS: %12.6g\n", mflops);
//print_array("Sequential Run", "u",(REAL*)u, n, m);
error_check(n, m, alpha, dx, dy, u, f);
free(u);
free(f);
free(uomp);
return 0;
}
/* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,mits)
******************************************************************
* Subroutine HelmholtzJ
* Solves poisson equation on rectangular grid assuming :
* (1) Uniform discretization in each direction, and
* (2) Dirichlect boundary conditions
*
* Jacobi method is used in this routine
*
* Input : n,m Number of grid points in the X/Y directions
* dx,dy Grid spacing in the X/Y directions
* alpha Helmholtz eqn. coefficient
* omega Relaxation factor
* f(n,m) Right hand side function
* u(n,m) Dependent variable/Solution
* tol Tolerance for iterative solver
* mits Maximum number of iterations
*
* Output : u(n,m) - Solution
*****************************************************************/
void jacobi_seq(int n, int m, REAL dx, REAL dy, REAL alpha, REAL omega, REAL *u_p, REAL *f_p, REAL tol, int mits) {
int i, j, k;
REAL error;
REAL ax;
REAL ay;
REAL b;
REAL resid;
REAL uold[n][m];
REAL (*u)[m] = (REAL (*)[m]) u_p;
REAL (*f)[m] = (REAL (*)[m]) f_p;
/*
* Initialize coefficients */
/* X-direction coef */
ax = (1.0 / (dx * dx));
/* Y-direction coef */
ay = (1.0 / (dy * dy));
/* Central coeff */
b = (((-2.0 / (dx * dx)) - (2.0 / (dy * dy))) - alpha);
error = (10.0 * tol);
k = 1;
while ((k <= mits) && (error > tol)) {
error = 0.0;
/* Copy new solution into old */
for (i = 0; i < n; i++)
for (j = 0; j < m; j++)
uold[i][j] = u[i][j];
for (i = 1; i < (n - 1); i++)
for (j = 1; j < (m - 1); j++) {
resid = (ax * (uold[i - 1][j] + uold[i + 1][j]) + ay * (uold[i][j - 1] + uold[i][j + 1]) +
b * uold[i][j] - f[i][j]) / b;
//printf("i: %d, j: %d, resid: %f\n", i, j, resid);
u[i][j] = uold[i][j] - omega * resid;
error = error + resid * resid;
}
/* Error check */
//if (k % 500 == 0)
// printf("Finished %d iteration with error: %g\n", k, error);
error = sqrt(error) / (n * m);
k = k + 1;
} /* End iteration loop */
printf("Total Number of Iterations: %d\n", k);
printf("Residual: %.15g\n", error);
}
void jacobi_omp(int n, int m, REAL dx, REAL dy, REAL alpha, REAL omega, REAL *u_p, REAL *f_p, REAL tol, int mits) {
int i, j, k;
REAL error;
REAL ax;
REAL ay;
REAL b;
REAL resid;
REAL *tmp = (REAL *) malloc(sizeof(REAL) * n * m);
REAL (*uold)[m] = (REAL (*)[m]) tmp;
REAL (*u)[m] = (REAL (*)[m]) u_p;
REAL (*f)[m] = (REAL (*)[m]) f_p;
/*
* Initialize coefficients */
/* X-direction coef */
ax = (1.0 / (dx * dx));
/* Y-direction coef */
ay = (1.0 / (dy * dy));
/* Central coeff */
b = (((-2.0 / (dx * dx)) - (2.0 / (dy * dy))) - alpha);
error = (10.0 * tol);
k = 1;
while ((k <= mits) && (error > tol)) {
error = 0.0;
//printf("===================== iteration %d ===========================\n", k);
/* Copy new solution into old */
for (i = 0; i < n; i++)
#pragma omp simd
for (j = 0; j < m; j++)
uold[i][j] = u[i][j];
for (i = 1; i < (n - 1); i++)
#pragma omp simd reduction(+:error)
for (j = 1; j < (m - 1); j++) {
resid = (ax * (uold[i - 1][j] + uold[i + 1][j]) + ay * (uold[i][j - 1] + uold[i][j + 1]) +
b * uold[i][j] - f[i][j]) / b;
//printf("i: %d, j: %d, resid: %f\n", i, j, resid);
u[i][j] = uold[i][j] - omega * resid;
error = error + resid * resid;
}
/* Error check */
//if (k % 500 == 0)
// printf("Finished %d iteration with error: %g\n", k, error);
error = sqrt(error) / (n * m);
k = k + 1;
} /* End iteration loop */
printf("Total Number of Iterations: %d\n", k);
printf("Residual: %.15g\n", error);
free(tmp);
}
|
GB_binop__bxnor_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bxnor_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__bxnor_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__bxnor_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__bxnor_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bxnor_int16)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bxnor_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__bxnor_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxnor_int16)
// C=scalar+B GB (_bind1st__bxnor_int16)
// C=scalar+B' GB (_bind1st_tran__bxnor_int16)
// C=A+scalar GB (_bind2nd__bxnor_int16)
// C=A'+scalar GB (_bind2nd_tran__bxnor_int16)
// C type: int16_t
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = ~((aij) ^ (bij))
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ~((x) ^ (y)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BXNOR || GxB_NO_INT16 || GxB_NO_BXNOR_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bxnor_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bxnor_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bxnor_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bxnor_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bxnor_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bxnor_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bxnor_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bxnor_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bxnor_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = ~((x) ^ (bij)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bxnor_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = ~((aij) ^ (y)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ~((x) ^ (aij)) ; \
}
GrB_Info GB (_bind1st_tran__bxnor_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ~((aij) ^ (y)) ; \
}
GrB_Info GB (_bind2nd_tran__bxnor_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__ainv_int16_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int16_uint16
// op(A') function: GB_tran__ainv_int16_uint16
// C type: int16_t
// A type: uint16_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
int16_t z = (int16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT16 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int16_uint16
(
int16_t *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int16_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__abs_bool_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_bool_int64
// op(A') function: GB_tran__abs_bool_int64
// C type: bool
// A type: int64_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_BOOL || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_bool_int64
(
bool *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_bool_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__ge_uint32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ge_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__ge_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__ge_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__ge_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_uint32)
// A*D function (colscale): GB (_AxD__ge_uint32)
// D*A function (rowscale): GB (_DxB__ge_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__ge_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__ge_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_uint32)
// C=scalar+B GB (_bind1st__ge_uint32)
// C=scalar+B' GB (_bind1st_tran__ge_uint32)
// C=A+scalar GB (_bind2nd__ge_uint32)
// C=A'+scalar GB (_bind2nd_tran__ge_uint32)
// C type: bool
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_UINT32 || GxB_NO_GE_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__ge_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ge_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ge_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ge_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ge_uint32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ge_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ge_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ge_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ge_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ge_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ge_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ge_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__ge_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__ge_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
log.h | /**
* Copyright (c) 2017 rxi
*
* This library is free software; you can redistribute it and/or modify it
* under the terms of the MIT license. See `log.c` for details.
*/
#pragma once
#ifdef USE_LOG
#include <cstdio>
#include <cstdarg>
#include <cstring>
#include <string>
#define LOG_VERSION "0.1.0"
typedef void (*log_LockFn)(void *udata, int lock);
enum {
LOG_TRACE, LOG_DEBUG, LOG_INFO, LOG_WARN, LOG_ERROR, LOG_FATAL
};
#define __FILENAME__ (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define RAPIDS_FILE (__FILENAME__)
#define log_trace(...) log_log(LOG_TRACE, RAPIDS_FILE, __FUNCTION__, __LINE__, __VA_ARGS__)
#define log_debug(...) log_log(LOG_DEBUG, RAPIDS_FILE, __FUNCTION__, __LINE__, __VA_ARGS__)
#define log_info(...) log_log(LOG_INFO, RAPIDS_FILE, __FUNCTION__, __LINE__, __VA_ARGS__)
#define log_warn(...) log_log(LOG_WARN, RAPIDS_FILE, __FUNCTION__, __LINE__, __VA_ARGS__)
#define log_error(...) log_log(LOG_ERROR, RAPIDS_FILE, __FUNCTION__, __LINE__, __VA_ARGS__)
#define log_fatal(...) log_log(LOG_FATAL, RAPIDS_FILE, __FUNCTION__, __LINE__, __VA_ARGS__)
void log_set_udata(void *udata);
void log_set_lock(log_LockFn fn);
void log_set_fp(FILE *fp);
void log_set_level(int level);
void log_set_quiet(int enable);
void log_log(int level, const char *file, const char *func, int line, const char *fmt, ...);
inline void print_str(std::string str) {
#define DEBUG
#ifdef DEBUG
static thread_local bool is_first = true;
if (is_first) {
#pragma omp single
log_info("%s", str.c_str());
is_first = false;
}
#endif
}
#else //use log
#define log_trace(...)
#define log_debug(...)
#define log_info(...)
#define log_warn(...)
#define log_error(...)
#define log_fatal(...)
#endif //use log |
bgmgraphgrid2D.h | /** @file bgmgraphgrid2D.h
* @brief class for manipulation with 2D grid
*
* Defines some basic functions for manipulaton with grids, especially for BLOCKGRAPHMATRIX.
*
* @author Lukas Pospisil
*/
#ifndef PASC_COMMON_BGMGRAPHGRID2D_H
#define PASC_COMMON_BGMGRAPHGRID2D_H
#include "general/algebra/graph/bgmgraph.h"
namespace pascinference {
namespace algebra {
/** \class BGMGraphGrid2D
* \brief Graph of two dimensional grid.
*
* Could be used for faster and simplier manipulation with image graph.
*
*/
template<class VectorBase>
class BGMGraphGrid2D: public BGMGraph<VectorBase> {
public:
class ExternalContent;
protected:
friend class ExternalContent;
ExternalContent *externalcontent; /**< for manipulation with external-specific stuff */
int width; /**< dimension of grid */
int height; /**< dimension of grid */
void process_grid_cuda();
public:
BGMGraphGrid2D(int width, int height);
BGMGraphGrid2D(std::string filename, int dim=2) : BGMGraph<VectorBase>(filename, dim) {};
BGMGraphGrid2D(const double *coordinates_array, int n, int dim) : BGMGraph<VectorBase>(coordinates_array, n, dim) {};
~BGMGraphGrid2D();
virtual std::string get_name() const;
virtual void process_grid();
int get_width() const;
int get_height() const;
void decompose(BGMGraphGrid2D<VectorBase> *finer_grid, int *bounding_box1, int *bounding_box2);
ExternalContent *get_externalcontent() const;
};
}
} /* end of namespace */
/* -------- IMPLEMENTATION ------- */
namespace pascinference {
namespace algebra {
template<class VectorBase>
BGMGraphGrid2D<VectorBase>::BGMGraphGrid2D(int width, int height) : BGMGraph<VectorBase>(){
LOG_FUNC_BEGIN
this->width = width;
this->height = height;
this->dim = 2;
this->n = width*height;
//TODO
LOG_FUNC_END
}
template<class VectorBase>
BGMGraphGrid2D<VectorBase>::~BGMGraphGrid2D(){
LOG_FUNC_BEGIN
LOG_FUNC_END
}
template<class VectorBase>
void BGMGraphGrid2D<VectorBase>::process_grid(){
LOG_FUNC_BEGIN
this->threshold = 1.1;
this->m = height*(width-1) + width*(height-1);
this->m_max = 4;
/* prepare array for number of neighbors */
this->neighbor_nmbs = (int*)malloc(this->n*sizeof(int));
this->neighbor_ids = (int**)malloc(this->n*sizeof(int*));
// #pragma omp parallel for
for(int idx=0;idx<width*height;idx++){
int i = idx/(double)width; /* index of row */
int j = idx - i*width; /* index of column */
/* compute number of neighbors */
int nmb = 0;
if(j>0){
nmb+=1;
}
if(j<width-1){
nmb+=1;
}
if(i>0){
nmb+=1;
}
if(i<height-1){
nmb+=1;
}
this->neighbor_nmbs[idx] = nmb;
this->neighbor_ids[idx] = (int*)malloc(this->neighbor_nmbs[idx]*sizeof(int));
/* fill neighbors */
nmb = 0;
if(j>0){ /* left */
this->neighbor_ids[idx][nmb] = idx-1;
nmb+=1;
}
if(j<width-1){ /* right */
this->neighbor_ids[idx][nmb] = idx+1;
nmb+=1;
}
if(i>0){ /* down */
this->neighbor_ids[idx][nmb] = idx-width;
nmb+=1;
}
if(i<height-1){ /* up */
this->neighbor_ids[idx][nmb] = idx+width;
nmb+=1;
}
}
this->processed = true;
LOG_FUNC_END
}
template<class VectorBase>
std::string BGMGraphGrid2D<VectorBase>::get_name() const {
return "BGMGraphGrid2D";
}
template<class VectorBase>
int BGMGraphGrid2D<VectorBase>::get_width() const {
return this->width;
}
template<class VectorBase>
int BGMGraphGrid2D<VectorBase>::get_height() const {
return this->height;
}
template<class VectorBase>
void BGMGraphGrid2D<VectorBase>::decompose(BGMGraphGrid2D<VectorBase> *finer_grid, int *bounding_box1, int *bounding_box2) {
LOG_FUNC_BEGIN
//TODO: if finer_grid is not decomposed, then give error
/* from finer grid decomposition create the decomposition of this graph */
this->DD_decomposed = true;
this->DD_size = finer_grid->get_DD_size();
/* allocate arrays */
this->DD_affiliation = (int*)malloc(this->n*sizeof(int)); /* domain id */
this->DD_permutation = (int*)malloc(this->n*sizeof(int)); /* Rorig = P(Rnew) */
this->DD_invpermutation = (int*)malloc(this->n*sizeof(int)); /* Rnew = Pinv(Rorig) */
this->DD_lengths = (int*)malloc(this->DD_size*sizeof(int)); /* array of local lengths */
this->DD_ranges = (int*)malloc((this->DD_size+1)*sizeof(int)); /* ranges in permutation array */
/* we use DD_lengths as a counters for domains */
set_value_array(this->DD_size, this->DD_lengths, 0);
/* pointer to original domain ids */
int *DD_affiliation1 = finer_grid->get_DD_affiliation();
int id_x1, id_y1; /* coordinates in finer grid */
int id_x2, id_y2; /* coordinates in coarser grid */
/* dimensions of the grid */
int width1 = finer_grid->get_width();
int height1 = finer_grid->get_height();
int width2 = width;
int height2 = height;
double diff_x = (width1 - 1)/(double)(width2 - 1);
double diff_y = (height1 - 1)/(double)(height2 - 1);
double center_x1, left_x1, right_x1;
double center_y1, left_y1, right_y1;
/* counter of affiliations in corresponding part in finer grid */
int DD_affiliation1_sub[this->DD_size];
int x1_min, x1_max, y1_min, y1_max;
bounding_box1[0] = std::numeric_limits<int>::max(); /* inf */
bounding_box1[1] = (-1)*std::numeric_limits<int>::max(); /* -inf */
bounding_box1[2] = std::numeric_limits<int>::max(); /* inf */
bounding_box1[3] = (-1)*std::numeric_limits<int>::max(); /* -inf */
for(int i=0;i<this->n;i++){
id_y2 = floor(i/((double)width2));
id_x2 = i - id_y2*width2;
center_x1 = id_x2*diff_x;
left_x1 = center_x1 - diff_x;
right_x1 = center_x1 + diff_x;
center_y1 = id_y2*diff_y;
left_y1 = center_y1 - diff_y;
right_y1 = center_y1 + diff_y;
/* find the affiliation of this point */
set_value_array(this->DD_size, DD_affiliation1_sub, 0);
x1_min = floor(left_x1);
x1_max = floor(right_x1);
y1_min = floor(left_y1);
y1_max = floor(right_y1);
for(id_x1 = x1_min; id_x1 <= x1_max; id_x1++){
for(id_y1 = y1_min; id_y1 <= y1_max; id_y1++){
if(id_x1 >= 0 && id_x1 < width1 && id_y1 >= 0 && id_y1 < height1){
DD_affiliation1_sub[DD_affiliation1[id_y1*width1 + id_x1]] += 1;
}
}
}
/* set the right affiliation of this point */
this->DD_affiliation[i] = max_arg_array(this->DD_size, DD_affiliation1_sub);
/* increase the counters of nodes in each domain */
this->DD_lengths[this->DD_affiliation[i]] += 1;
/* set new values of local bounding box */
if(this->DD_affiliation[i] == GlobalManager.get_rank()){
if(x1_min < bounding_box1[0] && x1_min >= 0) bounding_box1[0] = x1_min;
if(x1_max > bounding_box1[1] && x1_max < width1) bounding_box1[1] = x1_max;
if(y1_min < bounding_box1[2] && y1_min >= 0) bounding_box1[2] = y1_min;
if(y1_max > bounding_box1[3] && y1_max < height1) bounding_box1[3] = y1_max;
}
}
/* prepare ranges and zero DD_lengths - it will be used as counters in next step */
this->DD_ranges[0] = 0;
for(int i=1;i < this->DD_size+1; i++){
this->DD_ranges[i] = this->DD_ranges[i-1] + this->DD_lengths[i-1];
this->DD_lengths[i-1] = 0;
}
/* prepare permutation arrays */
for(int i=0;i<this->n;i++){
this->DD_invpermutation[i] = this->DD_ranges[this->DD_affiliation[i]] + this->DD_lengths[this->DD_affiliation[i]];
this->DD_permutation[this->DD_ranges[this->DD_affiliation[i]] + this->DD_lengths[this->DD_affiliation[i]]] = i;
this->DD_lengths[this->DD_affiliation[i]] += 1;
}
/* compute bounding_box2 from bounding_box1 */
bounding_box2[0] = floor(bounding_box1[0]/diff_x);
bounding_box2[1] = floor(bounding_box1[1]/diff_x);
bounding_box2[2] = floor(bounding_box1[2]/diff_y);
bounding_box2[3] = floor(bounding_box1[3]/diff_y);
LOG_FUNC_END
}
}
} /* end of namespace */
#endif
|
cf_openmp.c | #include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#include <time.h>
#include <sys/time.h>
#define MICRO_IN_SEC 1000000.00
// the item number and user number should be start from 1, if it start from 0, our program in get_*_data should be modified
#define USER_COUNT 384546
//#define USER_COUNT 3500
#define ITEM_COUNT 1019318
#define TEST_COUNT 19315653
#define RECORD_COUNT 29057933
#define ITEM_BLOCK 3500
//#define ITEM_BLOCK 384546
#define ITEM_ROUND (ITEM_COUNT/ITEM_BLOCK)
#define ITEM_LAST (ITEM_COUNT%ITEM_BLOCK)
#define USER_BLOCK 6000
#define USER_ROUND (USER_COUNT/USER_BLOCK)
#define USER_LAST (USER_COUNT%USER_BLOCK)
#define K_SORT 100
typedef struct record_item_struct_define
{
int item_id;
int user_id;
float rating;
}record_item_struct;
typedef struct k_similarity_struct_define
{
int k_index;
double k_similarity;
}k_similarity_struct;
double microtime(){
int tv_sec,tv_usec;
double time;
struct timeval tv;
struct timezone tz;
gettimeofday(&tv,&tz);
return tv.tv_sec+tv.tv_usec/MICRO_IN_SEC;
}
// Here, we assume that the item in {user,item,predict_rating} of the test dataset will cover all the items.
void get_pearson_similarity(float* rating_m, float* rating_n, double* average_matrix, k_similarity_struct * k_similarity_matrix, int start_index_m, int start_index_n, int flag){
printf(" in pearson_similarity start_index_m=%d, start_index_n=%d\n", start_index_m, start_index_n);
int nPadded = ( USER_COUNT%8 == 0 ? USER_COUNT : USER_COUNT + (8-USER_COUNT%8) );
double similarity;
int i,j,m,n,s,k;
int ua,ub,uc;
float sum_numerator=0;
float sum_denominator_square_m=0;
float sum_denominator_square_n=0;
float sum_denominator=0;
double* sum_numerator_matrix =(double*) _mm_malloc(sizeof(double)*USER_COUNT,64);
double* sum_denominator_matrix_m =(double*) _mm_malloc(sizeof(double)*USER_COUNT,64);
double* sum_denominator_matrix_n =(double*) _mm_malloc(sizeof(double)*USER_COUNT,64);
memset(sum_numerator_matrix,0,sizeof(double)*USER_COUNT);
memset(sum_denominator_matrix_m,0,sizeof(double)*USER_COUNT);
memset(sum_denominator_matrix_n,0,sizeof(double)*USER_COUNT);
// float * simi_temp = (float*)_mm_malloc(sizeof(float)*ITEM_BLOCK*USER_COUNT,64);
int numthreads;
int item_index;
int block_m,block_n;
int end_m=(ITEM_COUNT<(start_index_m+ITEM_BLOCK) ? ITEM_COUNT:(start_index_m+ITEM_BLOCK));
int end_n=(ITEM_COUNT<(start_index_n+ITEM_BLOCK) ? ITEM_COUNT:(start_index_n+ITEM_BLOCK));
printf("the number of threads is %d\n", omp_get_num_threads());
printf(" end_m = %d , end_n = %d\n",end_m,end_n);
double a = microtime();
//compute the pearson similarity
//#pragma omp parallel for collapse(2) private(i,j,k,m,n) reduction(+:sum_numerator,sum_denominator_square_m,sum_denominator_square_n)
//#pragma omp parallel
unsigned int mn;
#pragma omp parallel for
// for (mn = 0; mn < (((end_m-start_index_m)*(end_n-start_index_n))/16)*16; mn++)
for (mn = 0; mn < (end_m-start_index_m)*(end_n-start_index_n); mn++)
// for (m=start_index_m; m < end_m; m++)
{
// if (m%100==0) printf ("m = %d, percent= %f/%\n",m,(float)m/ITEM_COUNT*100);
// for(n=start_index_n; n< end_n; n++)
// {
m = start_index_m + mn / ( end_n - start_index_n );
n = start_index_n + mn % ( end_n - start_index_n );
block_m = m - start_index_m;
block_n = n - start_index_n;
// block_m = block_n =0;
sum_numerator=0;
sum_denominator_square_m=0;
sum_denominator_square_n=0;
sum_denominator=0;
//#pragma omp for schedule(static) nowait
// #pragma omp parallel for
// #pragma simd reduction(+:sum_numerator,sum_denominator_square_m,sum_denominator_square_n) novecremainder
#pragma simd reduction(+:sum_numerator,sum_denominator_square_m,sum_denominator_square_n)
// #pragma vector aligned
//#pragma noprefetch
// #pragma ivdep
// #pragma vector always
for (i=0;i < USER_COUNT; i++)
{
//compute numerator
sum_numerator += (rating_m[block_m*USER_COUNT+i])*(rating_n[block_n*USER_COUNT+i]);
// simi_temp[block_m*USER_COUNT+i]= rating_m[block_m*USER_COUNT+i];
//compute the squre in denominator
sum_denominator_square_m += powf((rating_m[block_m*USER_COUNT+i]),2.0);
// sum_denominator_square_m += rating_m[block_m*USER_COUNT+i]*rating_m[block_m*USER_COUNT+i];
sum_denominator_square_n += powf((rating_n[block_n*USER_COUNT+i]),2.0);
// sum_denominator_square_n += rating_n[block_n*USER_COUNT+i]*rating_n[block_n*USER_COUNT+i];
// if( m==100 && n==100)
// printf("m=%d,n=%d,i=%d, running on thread %d\n",m,n,i, omp_get_thread_num());
}
/*
float * rating_m_block = rating_m+block_m*USER_COUNT;
float * rating_n_block = rating_n+block_n*USER_COUNT;
// float sum_numerator, sum_denominator_square_m, sum_denominator_square_n;
// #pragma vector aligned (rating_m_block,rating_n_block)
float sum_numerator = __sec_reduce_add(rating_m_block[0:USER_COUNT] * rating_n_block[0:USER_COUNT]);
// float sum_denominator_square_m = __sec_reduce_add(rating_m_block[0:USER_COUNT] * rating_m_block[0:USER_COUNT]);
// #pragma vector aligned (rating_m_block)
float sum_denominator_square_m = __sec_reduce_add(rating_m_block[0:USER_COUNT]*rating_m_block[0:USER_COUNT]);
// #pragma vector aligned (rating_n_block)
float sum_denominator_square_n = __sec_reduce_add(rating_n_block[0:USER_COUNT]*rating_n_block[0:USER_COUNT]);
*/
//compute the denominator
sum_denominator = sqrt (sum_denominator_square_m*sum_denominator_square_n);
if(sum_denominator!=0)
similarity = sum_numerator/sum_denominator;
else
similarity = 0;
/*
for (j=0;j<K_SORT;j++)
{
item_index = k_similarity_matrix[m*K_SORT+j].k_index;
if(item_index==-1 || similarity > k_similarity_matrix[m*K_SORT+j].k_similarity)
{
for (s=K_SORT-1;s>j;s--)
{
k_similarity_matrix[m*K_SORT+s].k_index = k_similarity_matrix[m*K_SORT+s-1].k_index;
k_similarity_matrix[m*K_SORT+s].k_similarity = k_similarity_matrix[m*K_SORT+s-1].k_similarity;
}
k_similarity_matrix[m*K_SORT+j].k_index = n;
k_similarity_matrix[m*K_SORT+j].k_similarity = similarity;
break;
}
else if( similarity == k_similarity_matrix[m*K_SORT+j].k_similarity && item_index == n)
{
break;
}
}
if(flag==0) continue;
for (k=0;k<K_SORT;k++)
{
item_index = k_similarity_matrix[n*K_SORT+k].k_index;
if(item_index==-1 || similarity > k_similarity_matrix[n*K_SORT+k].k_similarity)
{
for (s=K_SORT-1;s>j;s--)
{
k_similarity_matrix[n*K_SORT+s].k_index = k_similarity_matrix[n*K_SORT+s-1].k_index;
k_similarity_matrix[n*K_SORT+s].k_similarity = k_similarity_matrix[n*K_SORT+s-1].k_similarity;
}
k_similarity_matrix[n*K_SORT+k].k_index = m;
k_similarity_matrix[n*K_SORT+k].k_similarity = similarity;
break;
}
else if( similarity == k_similarity_matrix[n*K_SORT+k].k_similarity && item_index == m)
{
break;
}
}
*/
// }
}
double b = microtime();
double duration = b-a;
printf(" time consumed: %fs\n", duration);
exit(0);
}
int get_predict_rating(double* predict_rating, float* rating, k_similarity_struct* index_matrix, int* test, double* average_index, int user_start_index,int test_start_index)
{
// Firstly, we should find the union set between rating&index_matrix for the users in test[];
printf(" in predict_rating ...........user_start_index=%d, test_start_index=%d\n", user_start_index, test_start_index);
int m,n,i,j;
int user_id,item_id,k_id;
double sum_rating = 0;
int sum_rating_index = 0;
int sum_no_rating_index=0;
double numerator = 0;
double denominator = 0;
int block_user_id;
// #pragma omp parallel for private(m,i) reduction(+:numerator,denominator)
for (m = test_start_index; m < TEST_COUNT; m++)
{
user_id = test[m*2+0];
item_id = test[m*2+1];
numerator =0;
denominator = 0;
if( user_id < user_start_index) printf(" error__________________+++++++++++++++\n");
if( user_id > ((user_start_index+USER_BLOCK) > USER_COUNT ? USER_COUNT:(user_start_index+USER_BLOCK))) break;
block_user_id = user_id - user_start_index;
for (i = 0; i < K_SORT; i++)
{
k_id = index_matrix[item_id*K_SORT+i].k_index;
if ( rating[block_user_id*ITEM_COUNT+k_id] !=0 )
{
numerator += index_matrix[item_id*K_SORT+i].k_similarity*(rating[block_user_id*ITEM_COUNT+k_id]-average_index[k_id]);
denominator += index_matrix[item_id*K_SORT+i].k_similarity;
}
}
if(denominator !=0)
predict_rating[m] = average_index[item_id] + numerator/ denominator;
else
predict_rating[m] = average_index[item_id];
}
// return predict_rating;
return m;
}
void print_matrix_double(double * matrix,int row,int column)
{
int i,j;
int sum_0=0;
for (i=0;i<row;i++)
{
for(j=0;j<column;j++)
if (matrix[i*column+j]==0) sum_0++;
// printf("%lf ",matrix[i*column+j]);
// printf("\n");
}
printf("sum_0 is %d in a whole %d\n",sum_0,row);
}
void get_item_data(record_item_struct* item_data, char* filename)
{
FILE *fp;
if ((fp=fopen(filename,"r")) == NULL)
{
printf("cannot open this file");
exit(0);
}
int user_id, item_id, timestamp;
float rating=0;
int i=0;
while (fscanf(fp,"%d %d %f %d", &item_id, &user_id, &rating, ×tamp) != EOF)
{
item_data[i].item_id = item_id;
item_data[i].user_id = user_id;
item_data[i].rating = rating;
i++;
}
fclose(fp);
}
int get_item_block_data(int round, float* data, int file_start_index, record_item_struct* item_data)
{
int i=0;
#pragma omp parallel for
for (i = 0; i<ITEM_BLOCK*USER_COUNT;i++)
{
data[i]=0;
}
// memset(data, 0, sizeof(float)*ITEM_BLOCK*USER_COUNT);
int user_id, item_id;
float rating=0;
for(i=file_start_index; ;i++)
{
item_id = item_data[i].item_id;
user_id = item_data[i].user_id;
rating = item_data[i].rating;
if ((item_id-1) >= (round+1)*ITEM_BLOCK) break;
data[(item_id-1-(round*ITEM_BLOCK))*USER_COUNT + user_id-1] = rating;
}
return i;
}
long get_user_block_data(int round, float* data, long file_start_index)
{
FILE *fp;
int i=0;
//int large_user_id=0;
//int large_item_id=0;
/*
float * data = (float*)malloc(sizeof(float)*USER_BLOCK*ITEM_COUNT);
if (data==NULL)
{
printf(" malloc of base_data failed\n");
exit(1);
}
*/
memset(data, 0, sizeof(float)*USER_BLOCK*ITEM_COUNT);
if ((fp=fopen("r1.train.raw","r")) == NULL)
{
printf("cannot open this file");
exit(0);
}
int user_id, item_id, timestamp;
float rating=0;
long file_offset = 0;
fseek(fp, file_start_index, 0);
while (fscanf(fp,"%d %d %f %d", &user_id, &item_id, &rating, ×tamp) != EOF)
{
if ((user_id-1) < round*USER_BLOCK) continue;
if ((user_id-1) >= (round+1)*USER_BLOCK) break;
data[(user_id-1-(round*USER_BLOCK))*ITEM_COUNT + item_id-1] = rating;
file_offset = ftell(fp);
// data[(user_id-1)*ITEM_COUNT + item_id-1] = rating;
// if (user_id > large_user_id) large_user_id = user_id;
// if (item_id > large_item_id) large_item_id = item_id;
// printf("getting base_data on line i=%d,user_id=%d,item_id=%d,rating=%f\n",i++,user_id,item_id,rating);
}
// printf("the largest user_id is %d\n the largest item_id is %d\n",large_user_id,large_item_id);
fclose(fp);
return file_offset;
// return data;
}
void get_test_data(int* data, float* rating)
{
FILE *fp;
int i=0;
memset(data, 0, sizeof(int));
if ((fp=fopen("r1.test","r")) == NULL)
{
printf("cannot open this file");
exit(0);
}
int user_id, item_id, timestamp;
float rating_temp;
while (fscanf(fp,"%d %d %f %d", &user_id, &item_id, &rating_temp, ×tamp) != EOF)
{
data[i*2+0] = user_id-1;
data[i*2+1] = item_id-1;
rating[i] = rating_temp;
i++;
//printf("getting test_data on line i=%d,user_id=%d,item_id=%d,rating_temp=%f\n",i,user_id,item_id,rating_temp);
}
fclose(fp);
}
double get_rmse(float* test_rating, double* predict_data)
{
int m,n,i,j;
double numerator = 0;
double denominator = TEST_COUNT;
#pragma omp parallel for private(i) reduction(+:numerator)
for(i=0;i<TEST_COUNT;i++)
{
numerator += pow ((test_rating[i] - predict_data[i]),2.0);
}
return (numerator/denominator);
}
void get_item_average_matrix(float* rating,double* average_matrix, int start_index)
{
int m,n,i,j;
int average_index =0;
int average_sum = 0;
int block_m = 0;
double average_item=0;
#pragma omp parallel for private(m,n) reduction(+:average_sum,average_index)
for ( m = start_index; m<(ITEM_COUNT < (start_index+ITEM_BLOCK) ? ITEM_COUNT:(start_index+ITEM_BLOCK)); m++ )
{
average_sum = 0;
average_index = 0;
block_m = m-start_index;
for(n=0;n<USER_COUNT;n++)
{
if(rating[block_m*USER_COUNT+n] !=0)
{
average_sum += rating[block_m*USER_COUNT+n];
average_index += 1;
}
}
if(average_index!=0)
{
average_matrix[m]=(double)average_sum/(double)average_index;
}
else
{
average_matrix[m]=0;
}
}
#pragma omp parallel for private(m,n)
// #pragma vector aligned
// #pragma ivdep
for ( m = start_index; m<(ITEM_COUNT < (start_index+ITEM_BLOCK) ? ITEM_COUNT:(start_index+ITEM_BLOCK)); m++ )
{
block_m = m-start_index;
average_item = average_matrix[m];
for(n=0;n<USER_COUNT;n++)
{
rating[block_m*USER_COUNT+n] -= (float)average_item;
}
}
}
int main(int argc, char ** argv){
//first, read the data in files into an array in order to process it more efficiently.
record_item_struct * item_data = (record_item_struct *) _mm_malloc(sizeof(record_item_struct)*RECORD_COUNT,64);
memset(item_data, 0, sizeof(record_item_struct)*RECORD_COUNT);
get_item_data(item_data, argv[1]);
float * item_block_data_i = (float*)_mm_malloc(sizeof(float)*ITEM_BLOCK*USER_COUNT,64);
float * item_block_data_j = (float*)_mm_malloc(sizeof(float)*ITEM_BLOCK*USER_COUNT,64);
if (item_block_data_i==NULL || item_block_data_j == NULL)
{
printf(" malloc of base_data failed\n");
exit(1);
}
double * item_average_matrix = (double*)_mm_malloc(sizeof(double)*ITEM_COUNT,64);
memset(item_average_matrix,0,sizeof(double)*ITEM_COUNT);
k_similarity_struct * item_index_matrix = (k_similarity_struct *) _mm_malloc (sizeof(k_similarity_struct)*K_SORT*ITEM_COUNT,64);
memset (item_index_matrix,-1,sizeof(k_similarity_struct));
int item_start_index_i=0;
int item_start_index_j=0;
int i,j;
for (i=0; i <= ITEM_ROUND; i++)
{
printf("round %d ================== with ITEM_BLOCK %d\n",i,ITEM_BLOCK);
//block_data
printf("get item_block_data begins\n");
item_start_index_i = get_item_block_data(i, item_block_data_i,item_start_index_i, item_data);
printf("get_item_block_data ends\n");
//average matrix
printf("get_item_average_matrix begins\n");
get_item_average_matrix(item_block_data_i, item_average_matrix,i*ITEM_BLOCK);
printf("get_item_average_matrix ends\n");
item_start_index_j = 0;
for( j=0; j<= i;j++)
{
if( i==j)
{
//the index of item after sorting the similarity matrix
printf("get k_similarity_begins\n");
get_pearson_similarity(item_block_data_i,item_block_data_i,item_average_matrix,item_index_matrix,i*ITEM_BLOCK,i*ITEM_BLOCK,0);
printf("get k_similarity_ends\n");
continue;
}
//block_data
printf("get item_block_data begins\n");
item_start_index_j = get_item_block_data(j, item_block_data_j, item_start_index_j, item_data);
printf("get_item_block_data ends\n");
//the index of item after sorting the similarity matrix
printf("get k_similarity_begins\n");
get_pearson_similarity(item_block_data_i,item_block_data_j,item_average_matrix,item_index_matrix,i*ITEM_BLOCK,j*ITEM_BLOCK,1);
printf("get k_similarity_ends\n");
}
}
_mm_free(item_block_data_i);
_mm_free(item_block_data_j);
int *test_data;
float *test_rating;
test_data = (int*)_mm_malloc (sizeof(int)*2*TEST_COUNT,64);
test_rating= (float*)_mm_malloc(sizeof(float)*TEST_COUNT,64);
printf("get_test_data begins\n");
get_test_data(test_data,test_rating);
printf("get_test_data ends\n");
long user_file_start_index = 0;
float * user_block_data = (float*)_mm_malloc(sizeof(float)*USER_BLOCK*ITEM_COUNT,64);
if (user_block_data==NULL)
{
printf(" malloc of base_data failed\n");
exit(1);
}
int test_start_index = 0;
double * item_predict_rating =(double*) _mm_malloc (sizeof(double)*TEST_COUNT,64);
for(i=0;i<=USER_ROUND;i++)
{
user_file_start_index = get_user_block_data(i,user_block_data, user_file_start_index);
printf("get_predict_rating begins\n");
test_start_index=get_predict_rating(item_predict_rating, user_block_data, item_index_matrix, test_data,item_average_matrix,i*USER_BLOCK, test_start_index);
printf("get_predict_rating ends\n");
if ( test_start_index == TEST_COUNT)
break;
}
_mm_free (user_block_data);
double rmse;
printf("get_rmse begins\n");
rmse = get_rmse(test_rating,item_predict_rating);
printf("ge_rmse ends\n");
printf("rmse= %f\n", rmse);
return 0;
}
|
broadcast_reduce_customized-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015-2017 by Contributors
* \file broadcast_reduce_customized-inl.h
* \brief CPU-specific Function definition of broadcast and reduce operators
*/
#ifndef MXNET_OPERATOR_NUMPY_LINALG_BROADCAST_REDUCE_CUSTOMIZED_INL_H_
#define MXNET_OPERATOR_NUMPY_LINALG_BROADCAST_REDUCE_CUSTOMIZED_INL_H_
#include "../../tensor/broadcast_reduce-inl.h"
namespace mxnet {
namespace op {
namespace broadcast {
using namespace mshadow;
using mxnet_op::unravel;
using mxnet_op::ravel;
using mxnet_op::dot;
using mxnet_op::unravel_dot;
template<typename Reducer, int ndim, typename AType, typename DType, typename OType, typename OP>
MSHADOW_XINLINE void seq_reduce_assign_wr(const index_t idx, const size_t M, const bool addto,
const DType* __restrict big, OType *small,
const Shape<ndim>& bshape, const Shape<ndim>& sshape,
const Shape<ndim>& rshape, const Shape<ndim>& rstride,
Reducer* reducer) {
Shape<ndim> coord = unravel(idx, sshape);
index_t j = ravel(coord, bshape);
AType val, residual;
reducer->SetInitValue(val, residual);
for (size_t k = 0; k < M; ++k) {
coord = unravel(k, rshape);
reducer->Reduce(val, AType(OP::Map(big[j + dot(coord, rstride)])), residual);
}
reducer->Finalize(val, residual);
assign(&small[idx], addto, OType(val));
}
#ifdef __CUDACC__
#include "broadcast_reduce_customized-inl.cuh"
#include "../../tensor/broadcast_reduce-inl.cuh"
#else
template<typename Reducer, int ndim, typename AType, typename DType, typename OType, typename OP>
void seq_reduce_compute_wr(const size_t N, const size_t M, const bool addto,
const DType *big, OType *small, const Shape<ndim> bshape,
const Shape<ndim> sshape, const Shape<ndim> rshape,
const Shape<ndim> rstride,
Reducer* reducer) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) {
seq_reduce_assign_wr<Reducer, ndim, AType, DType, OType, OP>(idx, M, addto, big, small,
bshape, sshape, rshape, rstride, reducer);
}
}
template <typename Reducer, int ndim, typename DType, typename OP, bool safe_acc = false>
void ReduceWithReducer(Stream<cpu>* s, const TBlob& small, const OpReqType req,
const Tensor<cpu, 1, char>& workspace, const TBlob& big,
Reducer* reducer) {
if (req == kNullOp) return;
Shape<ndim> rshape, rstride;
diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride);
size_t N = small.shape_.Size(), M = rshape.Size();
if (!safe_acc) {
seq_reduce_compute_wr<Reducer, ndim, DType, DType, DType, OP>(
N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(),
big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride, reducer);
} else {
MXNET_ACC_TYPE_SWITCH(mshadow::DataType<DType>::kFlag, DataType, AType, {
typedef typename std::conditional<safe_acc, AType, DataType>::type AccType;
MSHADOW_TYPE_SWITCH_WITH_BOOL(small.type_flag_, OType, {
typedef typename std::conditional<safe_acc, OType, DataType>::type OutType;
seq_reduce_compute_wr<Reducer, ndim, AccType, DataType, OutType, OP>(
N, M, req == kAddTo, big.dptr<DataType>(), small.dptr<OutType>(),
big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride, reducer);
});
});
}
}
template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
MSHADOW_XINLINE void seq_reduce_assign_wr(const index_t idx, const size_t M, const bool addto,
const DType* __restrict big, const DType* __restrict lhs,
const DType* __restrict rhs, DType *small,
const Shape<ndim>& big_shape,
const Shape<ndim>& lhs_shape0,
const Shape<ndim>& rhs_shape0,
const Shape<ndim>& small_shape, const Shape<ndim>& rshape,
const Shape<ndim>& lhs_shape,
const Shape<ndim>& rhs_shape,
const Shape<ndim>& rstride, const Shape<ndim>& lhs_stride,
const Shape<ndim>& rhs_stride,
Reducer* reducer) {
Shape<ndim> coord = unravel(idx, small_shape);
const index_t idx_big0 = ravel(coord, big_shape);
const index_t idx_lhs0 = ravel(coord, lhs_shape0);
const index_t idx_rhs0 = ravel(coord, rhs_shape0);
DType val, residual;
reducer->SetInitValue(val, residual);
for (size_t k = 0; k < M; ++k) {
Shape<ndim> coord_big = unravel(k, rshape);
index_t idx_big = idx_big0 + dot(coord_big, rstride);
Shape<ndim> coord_lhs = unravel(k, lhs_shape);
index_t idx_lhs = idx_lhs0 + dot(coord_lhs, lhs_stride);
Shape<ndim> coord_rhs = unravel(k, rhs_shape);
index_t idx_rhs = idx_rhs0 + dot(coord_rhs, rhs_stride);
reducer->Reduce(val, OP1::Map(big[idx_big], OP2::Map(lhs[idx_lhs], rhs[idx_rhs])), residual);
}
reducer->Finalize(val, residual);
assign(&small[idx], addto, val);
}
template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
void seq_reduce_compute_wr(const size_t N, const size_t M, const bool addto,
const DType *big, const DType *lhs, const DType *rhs, DType *small,
const Shape<ndim> big_shape, const Shape<ndim> small_shape,
const Shape<ndim> rshape, const Shape<ndim> rstride,
const Shape<ndim> lhs_shape, const Shape<ndim> lhs_stride,
const Shape<ndim> rhs_shape, const Shape<ndim> rhs_stride,
const Shape<ndim>& lhs_shape0, const Shape<ndim>& rhs_shape0,
Reducer* reducer) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) {
seq_reduce_assign_wr<Reducer, ndim, DType, OP1, OP2>(idx, M, addto, big, lhs, rhs, small,
big_shape, lhs_shape0, rhs_shape0, small_shape, rshape, lhs_shape, rhs_shape, rstride,
lhs_stride, rhs_stride, reducer);
}
}
template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
void ReduceWithReducer(Stream<cpu> *s, const TBlob& small, const OpReqType req,
const Tensor<cpu, 1, char>& workspace, const TBlob& big, const TBlob& lhs,
const TBlob& rhs, Reducer* reducer) {
if (req == kNullOp) return;
Shape<ndim> rshape, rstride;
diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride);
size_t N = small.shape_.Size();
size_t M = rshape.Size();
Shape<ndim> lhs_shape, lhs_stride;
diff(small.shape_.get<ndim>(), lhs.shape_.get<ndim>(), &lhs_shape, &lhs_stride);
Shape<ndim> rhs_shape, rhs_stride;
diff(small.shape_.get<ndim>(), rhs.shape_.get<ndim>(), &rhs_shape, &rhs_stride);
seq_reduce_compute_wr<Reducer, ndim, DType, OP1, OP2>(
N, M, req == kAddTo,
big.dptr<DType>(), lhs.dptr<DType>(), rhs.dptr<DType>(), small.dptr<DType>(),
big.shape_.get<ndim>(), small.shape_.get<ndim>(),
rshape, rstride,
lhs_shape, lhs_stride,
rhs_shape, rhs_stride,
lhs.shape_.get<ndim>(), rhs.shape_.get<ndim>(),
reducer);
}
#endif
} // namespace broadcast
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_NUMPY_LINALG_BROADCAST_REDUCE_CUSTOMIZED_INL_H_
|
RefCount.h | #ifndef MiscLib__REFCOUNT_HEADER__
#define MiscLib__REFCOUNT_HEADER__
#ifdef DOPARALLEL
#include <omp.h>
#endif
namespace MiscLib
{
class RefCount
{
public:
inline RefCount();
inline RefCount(const RefCount &);
inline unsigned int AddRef() const;
inline unsigned int Release() const;
inline RefCount &operator=(const RefCount &);
protected:
virtual ~RefCount();
private:
mutable unsigned int m_refCount;
};
RefCount::RefCount()
: m_refCount(1)
{}
RefCount::RefCount(const RefCount &)
: m_refCount(1)
{
// do not copy the ref count!
}
unsigned int RefCount::AddRef() const
{
#ifdef DOPARALLEL
#pragma omp atomic
#endif
++m_refCount;
return m_refCount;
}
unsigned int RefCount::Release() const
{
if(m_refCount == 1)
{
#ifdef DOPARALLEL
#pragma omp critical
#endif
{
if(m_refCount)
{
m_refCount = 0;
delete this;
}
}
return 0;
}
#ifdef DOPARALLEL
#pragma omp atomic
#endif
--m_refCount;
return m_refCount;
}
RefCount &RefCount::operator=(const RefCount &)
{
// do not copy the ref count!!!
return *this;
}
};
#endif
|
radial_integrals.h | // Copyright (c) 2013-2017 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file radial_integrals.h
*
* \brief Representation of various radial integrals.
*/
#ifndef __RADIAL_INTEGRALS_H__
#define __RADIAL_INTEGRALS_H__
#include "sbessel.h"
namespace sirius {
class Radial_integrals
{
private:
/// Basic parameters.
Simulation_parameters const& param_;
/// Unit cell.
Unit_cell const& unit_cell_;
/// Linear grid up to |G+k|_{max}
Radial_grid grid_gkmax_;
/// Linear grid up to |G|_{max}
Radial_grid grid_gmax_;
/// Linear grid up to |G|_{max} for radial integrals of local potential.
Radial_grid grid_gmax_vloc_;
mdarray<Spline<double>, 3> aug_radial_integrals_;
/// Beta-projector radial integrals.
mdarray<Spline<double>, 2> beta_radial_integrals_;
mdarray<Spline<double>, 2> beta_djldq_radial_integrals_;
mdarray<Spline<double>, 1> pseudo_core_radial_integrals_;
mdarray<Spline<double>, 1> pseudo_rho_radial_integrals_;
mdarray<Spline<double>, 1> vloc_radial_integrals_;
inline void generate_pseudo_rho_radial_integrals()
{
PROFILE("sirius::Radial_integrals::generate_pseudo_rho_radial_integrals");
pseudo_rho_radial_integrals_ = mdarray<Spline<double>, 1>(unit_cell_.num_atom_types());
for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) {
auto& atom_type = unit_cell_.atom_type(iat);
pseudo_rho_radial_integrals_(iat) = Spline<double>(grid_gmax_);
Spline<double> rho(atom_type.radial_grid());
for (int ir = 0; ir < atom_type.num_mt_points(); ir++) {
rho[ir] = atom_type.pp_desc().total_charge_density[ir];
}
rho.interpolate();
#pragma omp parallel for
for (int iq = 0; iq < grid_gmax_.num_points(); iq++) {
Spherical_Bessel_functions jl(0, atom_type.radial_grid(), grid_gmax_[iq]);
pseudo_rho_radial_integrals_(iat)[iq] = sirius::inner(jl[0], rho, 0, atom_type.num_mt_points()) / fourpi;
}
pseudo_rho_radial_integrals_(iat).interpolate();
}
}
/// Generate radial integrals for local part of pseudopotential.
/** See Potential::generate_local_potential() for more details. */
inline void generate_vloc_radial_integrals()
{
PROFILE("sirius::Radial_integrals::generate_vloc_radial_integrals");
vloc_radial_integrals_ = mdarray<Spline<double>, 1>(unit_cell_.num_atom_types());
for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) {
auto& atom_type = unit_cell_.atom_type(iat);
vloc_radial_integrals_(iat) = Spline<double>(grid_gmax_vloc_);
#pragma omp parallel for
for (int iq = 0; iq < grid_gmax_vloc_.num_points(); iq++) {
Spline<double> s(atom_type.radial_grid());
if (iq == 0) {
for (int ir = 0; ir < atom_type.num_mt_points(); ir++) {
double x = atom_type.radial_grid(ir);
s[ir] = (x * atom_type.pp_desc().vloc[ir] + atom_type.zn()) * x;
}
vloc_radial_integrals_(iat)[iq] = s.interpolate().integrate(0);
} else {
double g = grid_gmax_vloc_[iq];
double g2 = std::pow(g, 2);
for (int ir = 0; ir < atom_type.num_mt_points(); ir++) {
double x = atom_type.radial_grid(ir);
s[ir] = (x * atom_type.pp_desc().vloc[ir] + atom_type.zn() * gsl_sf_erf(x)) * std::sin(g * x);
}
vloc_radial_integrals_(iat)[iq] = (s.interpolate().integrate(0) / g - atom_type.zn() * std::exp(-g2 / 4) / g2);
}
}
vloc_radial_integrals_(iat).interpolate();
}
}
inline void generate_pseudo_core_radial_integrals()
{
PROFILE("sirius::Radial_integrals::generate_pseudo_core_radial_integrals");
pseudo_core_radial_integrals_ = mdarray<Spline<double>, 1>(unit_cell_.num_atom_types());
for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) {
auto& atom_type = unit_cell_.atom_type(iat);
pseudo_core_radial_integrals_(iat) = Spline<double>(grid_gmax_);
Spline<double> ps_core(atom_type.radial_grid());
for (int ir = 0; ir < atom_type.num_mt_points(); ir++) {
ps_core[ir] = atom_type.pp_desc().core_charge_density[ir];
}
ps_core.interpolate();
#pragma omp parallel for
for (int iq = 0; iq < grid_gmax_.num_points(); iq++) {
Spherical_Bessel_functions jl(0, atom_type.radial_grid(), grid_gmax_[iq]);
pseudo_core_radial_integrals_(iat)[iq] = sirius::inner(jl[0], ps_core, 2, atom_type.num_mt_points());
}
pseudo_core_radial_integrals_(iat).interpolate();
}
}
inline void generate_aug_radial_integrals()
{
PROFILE("sirius::Radial_integrals::generate_aug_radial_integrals");
int nmax = unit_cell_.max_mt_radial_basis_size();
int lmax = unit_cell_.lmax();
/* interpolate <j_{l_n}(q*x) | Q_{xi,xi'}^{l}(x) > with splines */
aug_radial_integrals_ = mdarray<Spline<double>, 3>(nmax * (nmax + 1) / 2, 2 * lmax + 1, unit_cell_.num_atom_types());
for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) {
auto& atom_type = unit_cell_.atom_type(iat);
if (!atom_type.pp_desc().augment) {
continue;
}
/* number of radial beta-functions */
int nbrf = atom_type.mt_radial_basis_size();
/* maximum l of beta-projectors */
int lmax_beta = atom_type.indexr().lmax();
for (int l = 0; l <= 2 * lmax_beta; l++) {
for (int idx = 0; idx < nbrf * (nbrf + 1) / 2; idx++) {
aug_radial_integrals_(idx, l, iat) = Spline<double>(grid_gmax_);
}
}
/* interpolate Q-operator radial functions */
mdarray<Spline<double>, 2> qrf_spline(nbrf * (nbrf + 1) / 2, 2 * lmax_beta + 1);
#pragma omp parallel for
for (int idx = 0; idx < nbrf * (nbrf + 1) / 2; idx++) {
for (int l3 = 0; l3 <= 2 * lmax_beta; l3++) {
qrf_spline(idx, l3) = Spline<double>(atom_type.radial_grid());
for (int ir = 0; ir < atom_type.num_mt_points(); ir++) {
qrf_spline(idx, l3)[ir] = atom_type.pp_desc().q_radial_functions_l(ir, idx, l3);
}
qrf_spline(idx, l3).interpolate();
}
}
#pragma omp parallel for
for (int iq = 0; iq < grid_gmax_.num_points(); iq++) {
Spherical_Bessel_functions jl(2 * lmax_beta, atom_type.radial_grid(), grid_gmax_[iq]);
for (int l3 = 0; l3 <= 2 * lmax_beta; l3++) {
for (int idxrf2 = 0; idxrf2 < nbrf; idxrf2++) {
int l2 = atom_type.indexr(idxrf2).l;
for (int idxrf1 = 0; idxrf1 <= idxrf2; idxrf1++) {
int l1 = atom_type.indexr(idxrf1).l;
int idx = idxrf2 * (idxrf2 + 1) / 2 + idxrf1;
if (l3 >= std::abs(l1 - l2) && l3 <= (l1 + l2) && (l1 + l2 + l3) % 2 == 0) {
aug_radial_integrals_(idx, l3, iat)[iq] = sirius::inner(jl[l3], qrf_spline(idx, l3), 0,
atom_type.num_mt_points());
}
}
}
}
}
for (int l = 0; l <= 2 * lmax_beta; l++) {
for (int idx = 0; idx < nbrf * (nbrf + 1) / 2; idx++) {
aug_radial_integrals_(idx, l, iat).interpolate();
}
}
}
}
inline void generate_beta_radial_integrals()
{
PROFILE("sirius::Radial_integrals::generate_beta_radial_integrals");
/* create space for <j_l(qr)|beta> radial integrals */
beta_radial_integrals_ = mdarray<Spline<double>, 2>(unit_cell_.max_mt_radial_basis_size(), unit_cell_.num_atom_types());
for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) {
auto& atom_type = unit_cell_.atom_type(iat);
int nrb = atom_type.mt_radial_basis_size();
for (int idxrf = 0; idxrf < atom_type.mt_radial_basis_size(); idxrf++) {
beta_radial_integrals_(idxrf, iat) = Spline<double>(grid_gkmax_);
}
/* interpolate beta radial functions */
std::vector<Spline<double>> beta_rf(nrb);
for (int idxrf = 0; idxrf < nrb; idxrf++) {
beta_rf[idxrf] = Spline<double>(atom_type.radial_grid());
int nr = atom_type.pp_desc().num_beta_radial_points[idxrf];
for (int ir = 0; ir < nr; ir++) {
beta_rf[idxrf][ir] = atom_type.pp_desc().beta_radial_functions(ir, idxrf);
}
beta_rf[idxrf].interpolate();
}
#pragma omp parallel for
for (int iq = 0; iq < grid_gkmax_.num_points(); iq++) {
Spherical_Bessel_functions jl(unit_cell_.lmax(), atom_type.radial_grid(), grid_gkmax_[iq]);
for (int idxrf = 0; idxrf < atom_type.mt_radial_basis_size(); idxrf++) {
int l = atom_type.indexr(idxrf).l;
int nr = atom_type.pp_desc().num_beta_radial_points[idxrf];
/* compute \int j_l(q * r) beta_l(r) r^2 dr */
/* remeber that beta(r) are defined as miltiplied by r */
beta_radial_integrals_(idxrf, iat)[iq] = sirius::inner(jl[l], beta_rf[idxrf], 1, nr);
}
}
for (int idxrf = 0; idxrf < atom_type.mt_radial_basis_size(); idxrf++) {
beta_radial_integrals_(idxrf, iat).interpolate();
}
}
}
inline void generate_beta_djldq_radial_integrals()
{
PROFILE("sirius::Radial_integrals::generate_beta_djldq_radial_integrals");
/* create space for <j_l(qr)|beta> radial integrals */
beta_djldq_radial_integrals_ = mdarray<Spline<double>, 2>(unit_cell_.max_mt_radial_basis_size(), unit_cell_.num_atom_types());
for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) {
auto& atom_type = unit_cell_.atom_type(iat);
int nrb = atom_type.mt_radial_basis_size();
for (int idxrf = 0; idxrf < atom_type.mt_radial_basis_size(); idxrf++) {
beta_djldq_radial_integrals_(idxrf, iat) = Spline<double>(grid_gkmax_);
}
/* interpolate beta radial functions */
std::vector<Spline<double>> beta_rf(nrb);
for (int idxrf = 0; idxrf < nrb; idxrf++) {
beta_rf[idxrf] = Spline<double>(atom_type.radial_grid());
int nr = atom_type.pp_desc().num_beta_radial_points[idxrf];
for (int ir = 0; ir < nr; ir++) {
beta_rf[idxrf][ir] = atom_type.pp_desc().beta_radial_functions(ir, idxrf);
}
beta_rf[idxrf].interpolate();
}
#pragma omp parallel for
for (int iq = 0; iq < grid_gkmax_.num_points(); iq++) {
Spherical_Bessel_functions jl(unit_cell_.lmax(), atom_type.radial_grid(), grid_gkmax_[iq]);
for (int idxrf = 0; idxrf < atom_type.mt_radial_basis_size(); idxrf++) {
int l = atom_type.indexr(idxrf).l;
int nr = atom_type.pp_desc().num_beta_radial_points[idxrf];
/* compute \int d (j_l(q*r) / dq) beta_l(r) r^2 dr */
/* remeber that beta(r) are defined as miltiplied by r */
auto s = jl.deriv_q(l);
beta_djldq_radial_integrals_(idxrf, iat)[iq] = sirius::inner(s, beta_rf[idxrf], 1, nr);
}
}
for (int idxrf = 0; idxrf < atom_type.mt_radial_basis_size(); idxrf++) {
beta_djldq_radial_integrals_(idxrf, iat).interpolate();
}
}
}
inline std::pair<int, double> iqdq_gkmax(double q__) const
{
std::pair<int, double> result;
result.first = static_cast<int>((grid_gkmax_.num_points() - 1) * q__ / param_.gk_cutoff());
/* delta q = q - q_i */
result.second = q__ - grid_gkmax_[result.first];
return std::move(result);
}
inline std::pair<int, double> iqdq_gmax(double q__) const
{
std::pair<int, double> result;
result.first = static_cast<int>((grid_gmax_.num_points() - 1) * q__ / param_.pw_cutoff());
/* delta q = q - q_i */
result.second = q__ - grid_gmax_[result.first];
return std::move(result);
}
inline std::pair<int, double> iqdq_gmax_vloc(double q__) const
{
std::pair<int, double> result;
result.first = static_cast<int>((grid_gmax_vloc_.num_points() - 1) * q__ / param_.pw_cutoff());
/* delta q = q - q_i */
result.second = q__ - grid_gmax_vloc_[result.first];
return std::move(result);
}
public:
/// Constructor.
Radial_integrals(Simulation_parameters const& param__,
Unit_cell const& unit_cell__)
: param_(param__)
, unit_cell_(unit_cell__)
{
grid_gmax_ = Radial_grid(linear_grid, static_cast<int>(20 * param_.pw_cutoff()), 0, param_.pw_cutoff());
grid_gkmax_ = Radial_grid(linear_grid, static_cast<int>(20 * param_.gk_cutoff()), 0, param_.gk_cutoff());
grid_gmax_vloc_ = Radial_grid(linear_grid, static_cast<int>(100 * param_.pw_cutoff()), 0, param_.pw_cutoff());
if (param_.esm_type() == electronic_structure_method_t::pseudopotential) {
generate_beta_radial_integrals();
generate_beta_djldq_radial_integrals();
generate_aug_radial_integrals();
generate_pseudo_core_radial_integrals();
generate_pseudo_rho_radial_integrals();
generate_vloc_radial_integrals();
}
}
inline double beta_radial_integral(int idxrf__, int iat__, double q__) const
{
auto iqdq = iqdq_gkmax(q__);
return beta_radial_integrals_(idxrf__, iat__)(iqdq.first, iqdq.second);
}
inline double beta_djldq_radial_integral(int idxrf__, int iat__, double q__) const
{
auto iqdq = iqdq_gkmax(q__);
return beta_djldq_radial_integrals_(idxrf__, iat__)(iqdq.first, iqdq.second);
}
inline double aug_radial_integral(int idx__, int l__, int iat__, double q__) const
{
auto iqdq = iqdq_gmax(q__);
return aug_radial_integrals_(idx__, l__, iat__)(iqdq.first, iqdq.second);
}
inline double pseudo_core_radial_integral(int iat__, double q__) const
{
auto iqdq = iqdq_gmax(q__);
return pseudo_core_radial_integrals_(iat__)(iqdq.first, iqdq.second);
}
inline double pseudo_rho_radial_integral(int iat__, double q__) const
{
auto iqdq = iqdq_gmax(q__);
return pseudo_rho_radial_integrals_(iat__)(iqdq.first, iqdq.second);
}
inline double vloc_radial_integral(int iat__, double q__) const
{
auto iqdq = iqdq_gmax_vloc(q__);
return vloc_radial_integrals_(iat__)(iqdq.first, iqdq.second);
}
};
}
#endif // __RADIAL_INTEGRALS_H__
|
colorspace.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE %
% C O O L O O R R SS P P A A C E %
% C O O L O O RRRR SSS PPPP AAAAA C EEE %
% C O O L O O R R SS P A A C E %
% CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE %
% %
% %
% MagickCore Image Colorspace Methods %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/property.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/gem.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-private.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/utility.h"
/*
Typedef declarations.
*/
typedef struct _TransformPacket
{
MagickRealType
x,
y,
z;
} TransformPacket;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R G B T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RGBTransformImage() converts the reference image from RGB to an alternate
% colorspace. The transformation matrices are not the standard ones: the
% weights are rescaled to normalized the range of the transformed values to
% be [0..QuantumRange].
%
% The format of the RGBTransformImage method is:
%
% MagickBooleanType RGBTransformImage(Image *image,
% const ColorspaceType colorspace)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace to transform the image to.
%
*/
static inline void ConvertRGBToXYZ(const Quantum red,const Quantum green,
const Quantum blue,double *X,double *Y,double *Z)
{
double
b,
g,
r;
assert(X != (double *) NULL);
assert(Y != (double *) NULL);
assert(Z != (double *) NULL);
r=QuantumScale*red;
if (r > 0.04045)
r=pow((r+0.055)/1.055,2.4);
else
r/=12.92;
g=QuantumScale*green;
if (g > 0.04045)
g=pow((g+0.055)/1.055,2.4);
else
g/=12.92;
b=QuantumScale*blue;
if (b > 0.04045)
b=pow((b+0.055)/1.055,2.4);
else
b/=12.92;
*X=0.4124240*r+0.3575790*g+0.1804640*b;
*Y=0.2126560*r+0.7151580*g+0.0721856*b;
*Z=0.0193324*r+0.1191930*g+0.9504440*b;
}
static double LabF1(double alpha)
{
if (alpha <= ((24.0/116.0)*(24.0/116.0)*(24.0/116.0)))
return((841.0/108.0)*alpha+(16.0/116.0));
return(pow(alpha,1.0/3.0));
}
static inline void ConvertXYZToLab(const double X,const double Y,const double Z,
double *L,double *a,double *b)
{
#define D50X (0.9642)
#define D50Y (1.0)
#define D50Z (0.8249)
double
fx,
fy,
fz;
assert(L != (double *) NULL);
assert(a != (double *) NULL);
assert(b != (double *) NULL);
*L=0.0;
*a=0.5;
*b=0.5;
if ((X == 0.0) && (Y == 0.0) && (Z == 0.0))
return;
fx=LabF1(X/D50X);
fy=LabF1(Y/D50Y);
fz=LabF1(Z/D50Z);
*L=(116.0*fy-16.0)/100.0;
*a=(500.0*(fx-fy))/255.0;
if (*a < 0.0)
*a+=1.0;
*b=(200.0*(fy-fz))/255.0;
if (*b < 0.0)
*b+=1.0;
}
MagickExport MagickBooleanType RGBTransformImage(Image *image,
const ColorspaceType colorspace)
{
#define RGBTransformImageTag "RGBTransform/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status,
sync;
MagickOffsetType
progress;
PrimaryInfo
primary_info;
register ssize_t
i;
ssize_t
y;
TransformPacket
*x_map,
*y_map,
*z_map;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(colorspace != RGBColorspace);
assert(colorspace != TransparentColorspace);
assert(colorspace != UndefinedColorspace);
switch (image->colorspace)
{
case GRAYColorspace:
case Rec601LumaColorspace:
case Rec709LumaColorspace:
case RGBColorspace:
case TransparentColorspace:
break;
default:
{
(void) TransformImageColorspace(image,image->colorspace);
break;
}
}
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=(&image->exception);
switch (colorspace)
{
case CMYColorspace:
{
/*
Convert RGB to CMY colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum((MagickRealType)
(QuantumRange-GetPixelRed(q))));
SetPixelGreen(q,ClampToQuantum((MagickRealType)
(QuantumRange-GetPixelGreen(q))));
SetPixelBlue(q,ClampToQuantum((MagickRealType)
(QuantumRange-GetPixelBlue(q))));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->type=image->matte == MagickFalse ? ColorSeparationType :
ColorSeparationMatteType;
return(status);
}
case CMYKColorspace:
{
MagickPixelPacket
zero;
/*
Convert RGB to CMYK colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
GetMagickPixelPacket(image,&zero);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
ConvertRGBToCMYK(&pixel);
SetPixelPacket(image,&pixel,q,indexes+x);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->type=image->matte == MagickFalse ? ColorSeparationType :
ColorSeparationMatteType;
return(status);
}
case HSBColorspace:
{
/*
Transform image from RGB to HSB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
brightness,
hue,
saturation;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
hue=0.0;
saturation=0.0;
brightness=0.0;
for (x=0; x < (ssize_t) image->columns; x++)
{
ConvertRGBToHSB(GetPixelRed(q),GetPixelGreen(q),
GetPixelBlue(q),&hue,&saturation,&brightness);
SetPixelRed(q,ClampToQuantum((MagickRealType)
QuantumRange*hue));
SetPixelGreen(q,ClampToQuantum((MagickRealType)
QuantumRange*saturation));
SetPixelBlue(q,ClampToQuantum((MagickRealType)
QuantumRange*brightness));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
case HSLColorspace:
{
/*
Transform image from RGB to HSL.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
hue,
lightness,
saturation;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
hue=0.0;
saturation=0.0;
lightness=0.0;
for (x=0; x < (ssize_t) image->columns; x++)
{
ConvertRGBToHSL(GetPixelRed(q),GetPixelGreen(q),
GetPixelBlue(q),&hue,&saturation,&lightness);
SetPixelRed(q,ClampToQuantum((MagickRealType)
QuantumRange*hue));
SetPixelGreen(q,ClampToQuantum((MagickRealType)
QuantumRange*saturation));
SetPixelBlue(q,ClampToQuantum((MagickRealType)
QuantumRange*lightness));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
case HWBColorspace:
{
/*
Transform image from RGB to HWB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
blackness,
hue,
whiteness;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
hue=0.0;
whiteness=0.0;
blackness=0.0;
for (x=0; x < (ssize_t) image->columns; x++)
{
ConvertRGBToHWB(GetPixelRed(q),GetPixelGreen(q),
GetPixelBlue(q),&hue,&whiteness,&blackness);
SetPixelRed(q,ClampToQuantum((MagickRealType)
QuantumRange*hue));
SetPixelGreen(q,ClampToQuantum((MagickRealType)
QuantumRange*whiteness));
SetPixelBlue(q,ClampToQuantum((MagickRealType)
QuantumRange*blackness));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
case LabColorspace:
{
/*
Transform image from RGB to Lab.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
a,
b,
L,
X,
Y,
Z;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
L=0.0;
a=0.0;
b=0.0;
X=0.0;
Y=0.0;
Z=0.0;
for (x=0; x < (ssize_t) image->columns; x++)
{
ConvertRGBToXYZ(GetPixelRed(q),GetPixelGreen(q),
GetPixelBlue(q),&X,&Y,&Z);
ConvertXYZToLab(X,Y,Z,&L,&a,&b);
SetPixelRed(q,ClampToQuantum((MagickRealType)
QuantumRange*L));
SetPixelGreen(q,ClampToQuantum((MagickRealType)
QuantumRange*a));
SetPixelBlue(q,ClampToQuantum((MagickRealType)
QuantumRange*b));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
case LogColorspace:
{
#define DisplayGamma (1.0/1.7)
#define FilmGamma 0.6
#define ReferenceBlack 95.0
#define ReferenceWhite 685.0
const char
*value;
double
black,
density,
film_gamma,
gamma,
reference_black,
reference_white;
Quantum
*logmap;
/*
Transform RGB to Log colorspace.
*/
density=DisplayGamma;
gamma=DisplayGamma;
value=GetImageProperty(image,"gamma");
if (value != (const char *) NULL)
gamma=1.0/InterpretLocaleValue(value,(char **) NULL) != 0.0 ?
InterpretLocaleValue(value,(char **) NULL) : 1.0;
film_gamma=FilmGamma;
value=GetImageProperty(image,"film-gamma");
if (value != (const char *) NULL)
film_gamma=InterpretLocaleValue(value,(char **) NULL);
reference_black=ReferenceBlack;
value=GetImageProperty(image,"reference-black");
if (value != (const char *) NULL)
reference_black=InterpretLocaleValue(value,(char **) NULL);
reference_white=ReferenceWhite;
value=GetImageProperty(image,"reference-white");
if (value != (const char *) NULL)
reference_white=InterpretLocaleValue(value,(char **) NULL);
logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*logmap));
if (logmap == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
black=pow(10.0,(reference_black-reference_white)*(gamma/density)*
0.002/film_gamma);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
logmap[i]=ScaleMapToQuantum((MagickRealType) (MaxMap*(reference_white+
log10(black+((MagickRealType) i/MaxMap)*(1.0-black))/((gamma/density)*
0.002/film_gamma))/1024.0));
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
SetPixelRed(q,logmap[ScaleQuantumToMap(
GetPixelRed(q))]);
SetPixelGreen(q,logmap[ScaleQuantumToMap(
GetPixelGreen(q))]);
SetPixelBlue(q,logmap[ScaleQuantumToMap(
GetPixelBlue(q))]);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
logmap=(Quantum *) RelinquishMagickMemory(logmap);
return(status);
}
default:
break;
}
/*
Allocate the tables.
*/
x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*x_map));
y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*y_map));
z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*z_map));
if ((x_map == (TransformPacket *) NULL) ||
(y_map == (TransformPacket *) NULL) ||
(z_map == (TransformPacket *) NULL))
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) ResetMagickMemory(&primary_info,0,sizeof(primary_info));
switch (colorspace)
{
case OHTAColorspace:
{
/*
Initialize OHTA tables:
I1 = 0.33333*R+0.33334*G+0.33333*B
I2 = 0.50000*R+0.00000*G-0.50000*B
I3 =-0.25000*R+0.50000*G-0.25000*B
I and Q, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.33333f*(MagickRealType) i;
y_map[i].x=0.33334f*(MagickRealType) i;
z_map[i].x=0.33333f*(MagickRealType) i;
x_map[i].y=0.50000f*(MagickRealType) i;
y_map[i].y=0.00000f*(MagickRealType) i;
z_map[i].y=(-0.50000f)*(MagickRealType) i;
x_map[i].z=(-0.25000f)*(MagickRealType) i;
y_map[i].z=0.50000f*(MagickRealType) i;
z_map[i].z=(-0.25000f)*(MagickRealType) i;
}
break;
}
case Rec601LumaColorspace:
case GRAYColorspace:
{
/*
Initialize Rec601 luma tables:
G = 0.29900*R+0.58700*G+0.11400*B
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.29900f*(MagickRealType) i;
y_map[i].x=0.58700f*(MagickRealType) i;
z_map[i].x=0.11400f*(MagickRealType) i;
x_map[i].y=0.29900f*(MagickRealType) i;
y_map[i].y=0.58700f*(MagickRealType) i;
z_map[i].y=0.11400f*(MagickRealType) i;
x_map[i].z=0.29900f*(MagickRealType) i;
y_map[i].z=0.58700f*(MagickRealType) i;
z_map[i].z=0.11400f*(MagickRealType) i;
}
image->type=GrayscaleType;
break;
}
case Rec601YCbCrColorspace:
case YCbCrColorspace:
{
/*
Initialize YCbCr tables (ITU-R BT.601):
Y = 0.299000*R+0.587000*G+0.114000*B
Cb= -0.168736*R-0.331264*G+0.500000*B
Cr= 0.500000*R-0.418688*G-0.081312*B
Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.299000f*(MagickRealType) i;
y_map[i].x=0.587000f*(MagickRealType) i;
z_map[i].x=0.114000f*(MagickRealType) i;
x_map[i].y=(-0.168730f)*(MagickRealType) i;
y_map[i].y=(-0.331264f)*(MagickRealType) i;
z_map[i].y=0.500000f*(MagickRealType) i;
x_map[i].z=0.500000f*(MagickRealType) i;
y_map[i].z=(-0.418688f)*(MagickRealType) i;
z_map[i].z=(-0.081312f)*(MagickRealType) i;
}
break;
}
case Rec709LumaColorspace:
{
/*
Initialize Rec709 luma tables:
G = 0.21260*R+0.71520*G+0.07220*B
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.21260f*(MagickRealType) i;
y_map[i].x=0.71520f*(MagickRealType) i;
z_map[i].x=0.07220f*(MagickRealType) i;
x_map[i].y=0.21260f*(MagickRealType) i;
y_map[i].y=0.71520f*(MagickRealType) i;
z_map[i].y=0.07220f*(MagickRealType) i;
x_map[i].z=0.21260f*(MagickRealType) i;
y_map[i].z=0.71520f*(MagickRealType) i;
z_map[i].z=0.07220f*(MagickRealType) i;
}
break;
}
case Rec709YCbCrColorspace:
{
/*
Initialize YCbCr tables (ITU-R BT.709):
Y = 0.212600*R+0.715200*G+0.072200*B
Cb= -0.114572*R-0.385428*G+0.500000*B
Cr= 0.500000*R-0.454153*G-0.045847*B
Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.212600f*(MagickRealType) i;
y_map[i].x=0.715200f*(MagickRealType) i;
z_map[i].x=0.072200f*(MagickRealType) i;
x_map[i].y=(-0.114572f)*(MagickRealType) i;
y_map[i].y=(-0.385428f)*(MagickRealType) i;
z_map[i].y=0.500000f*(MagickRealType) i;
x_map[i].z=0.500000f*(MagickRealType) i;
y_map[i].z=(-0.454153f)*(MagickRealType) i;
z_map[i].z=(-0.045847f)*(MagickRealType) i;
}
break;
}
case sRGBColorspace:
{
/*
Linear sRGB to nonlinear RGB (http://www.w3.org/Graphics/Color/sRGB):
R = 1.0*R+0.0*G+0.0*B
G = 0.0*R+0.1*G+0.0*B
B = 0.0*R+0.0*G+1.0*B
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
MagickRealType
v;
v=(MagickRealType) i/(MagickRealType) MaxMap;
if (((MagickRealType) i/(MagickRealType) MaxMap) <= 0.04045f)
v/=12.92f;
else
v=(MagickRealType) pow((((double) i/MaxMap)+0.055)/1.055,2.4);
x_map[i].x=1.0f*MaxMap*v;
y_map[i].x=0.0f*MaxMap*v;
z_map[i].x=0.0f*MaxMap*v;
x_map[i].y=0.0f*MaxMap*v;
y_map[i].y=1.0f*MaxMap*v;
z_map[i].y=0.0f*MaxMap*v;
x_map[i].z=0.0f*MaxMap*v;
y_map[i].z=0.0f*MaxMap*v;
z_map[i].z=1.0f*MaxMap*v;
}
break;
}
case XYZColorspace:
{
/*
Initialize CIE XYZ tables (ITU-R 709 RGB):
X = 0.4124564*R+0.3575761*G+0.1804375*B
Y = 0.2126729*R+0.7151522*G+0.0721750*B
Z = 0.0193339*R+0.1191920*G+0.9503041*B
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.4124564f*(MagickRealType) i;
y_map[i].x=0.3575761f*(MagickRealType) i;
z_map[i].x=0.1804375f*(MagickRealType) i;
x_map[i].y=0.2126729f*(MagickRealType) i;
y_map[i].y=0.7151522f*(MagickRealType) i;
z_map[i].y=0.0721750f*(MagickRealType) i;
x_map[i].z=0.0193339f*(MagickRealType) i;
y_map[i].z=0.1191920f*(MagickRealType) i;
z_map[i].z=0.9503041f*(MagickRealType) i;
}
break;
}
case YCCColorspace:
{
/*
Initialize YCC tables:
Y = 0.29900*R+0.58700*G+0.11400*B
C1= -0.29900*R-0.58700*G+0.88600*B
C2= 0.70100*R-0.58700*G-0.11400*B
YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137.
*/
primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156));
primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137));
for (i=0; i <= (ssize_t) (0.018*MaxMap); i++)
{
x_map[i].x=0.003962014134275617f*(MagickRealType) i;
y_map[i].x=0.007778268551236748f*(MagickRealType) i;
z_map[i].x=0.001510600706713781f*(MagickRealType) i;
x_map[i].y=(-0.002426619775463276f)*(MagickRealType) i;
y_map[i].y=(-0.004763965913702149f)*(MagickRealType) i;
z_map[i].y=0.007190585689165425f*(MagickRealType) i;
x_map[i].z=0.006927257754597858f*(MagickRealType) i;
y_map[i].z=(-0.005800713697502058f)*(MagickRealType) i;
z_map[i].z=(-0.0011265440570958f)*(MagickRealType) i;
}
for ( ; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.2201118963486454*(1.099f*(MagickRealType) i-0.099f);
y_map[i].x=0.4321260306242638*(1.099f*(MagickRealType) i-0.099f);
z_map[i].x=0.08392226148409894*(1.099f*(MagickRealType) i-0.099f);
x_map[i].y=(-0.1348122097479598)*(1.099f*(MagickRealType) i-0.099f);
y_map[i].y=(-0.2646647729834528)*(1.099f*(MagickRealType) i-0.099f);
z_map[i].y=0.3994769827314126*(1.099f*(MagickRealType) i-0.099f);
x_map[i].z=0.3848476530332144*(1.099f*(MagickRealType) i-0.099f);
y_map[i].z=(-0.3222618720834477)*(1.099f*(MagickRealType) i-0.099f);
z_map[i].z=(-0.06258578094976668)*(1.099f*(MagickRealType) i-0.099f);
}
break;
}
case YIQColorspace:
{
/*
Initialize YIQ tables:
Y = 0.29900*R+0.58700*G+0.11400*B
I = 0.59600*R-0.27400*G-0.32200*B
Q = 0.21100*R-0.52300*G+0.31200*B
I and Q, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.29900f*(MagickRealType) i;
y_map[i].x=0.58700f*(MagickRealType) i;
z_map[i].x=0.11400f*(MagickRealType) i;
x_map[i].y=0.59600f*(MagickRealType) i;
y_map[i].y=(-0.27400f)*(MagickRealType) i;
z_map[i].y=(-0.32200f)*(MagickRealType) i;
x_map[i].z=0.21100f*(MagickRealType) i;
y_map[i].z=(-0.52300f)*(MagickRealType) i;
z_map[i].z=0.31200f*(MagickRealType) i;
}
break;
}
case YPbPrColorspace:
{
/*
Initialize YPbPr tables (ITU-R BT.601):
Y = 0.299000*R+0.587000*G+0.114000*B
Pb= -0.168736*R-0.331264*G+0.500000*B
Pr= 0.500000*R-0.418688*G-0.081312*B
Pb and Pr, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.299000f*(MagickRealType) i;
y_map[i].x=0.587000f*(MagickRealType) i;
z_map[i].x=0.114000f*(MagickRealType) i;
x_map[i].y=(-0.168736f)*(MagickRealType) i;
y_map[i].y=(-0.331264f)*(MagickRealType) i;
z_map[i].y=0.500000f*(MagickRealType) i;
x_map[i].z=0.500000f*(MagickRealType) i;
y_map[i].z=(-0.418688f)*(MagickRealType) i;
z_map[i].z=(-0.081312f)*(MagickRealType) i;
}
break;
}
case YUVColorspace:
default:
{
/*
Initialize YUV tables:
Y = 0.29900*R+0.58700*G+0.11400*B
U = -0.14740*R-0.28950*G+0.43690*B
V = 0.61500*R-0.51500*G-0.10000*B
U and V, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange. Note that U = 0.493*(B-Y), V = 0.877*(R-Y).
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.29900f*(MagickRealType) i;
y_map[i].x=0.58700f*(MagickRealType) i;
z_map[i].x=0.11400f*(MagickRealType) i;
x_map[i].y=(-0.14740f)*(MagickRealType) i;
y_map[i].y=(-0.28950f)*(MagickRealType) i;
z_map[i].y=0.43690f*(MagickRealType) i;
x_map[i].z=0.61500f*(MagickRealType) i;
y_map[i].z=(-0.51500f)*(MagickRealType) i;
z_map[i].z=(-0.10000f)*(MagickRealType) i;
}
break;
}
}
/*
Convert from RGB.
*/
switch (image->storage_class)
{
case DirectClass:
default:
{
/*
Convert DirectClass image.
*/
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
register ssize_t
x;
register PixelPacket
*restrict q;
register size_t
blue,
green,
red;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
red=ScaleQuantumToMap(GetPixelRed(q));
green=ScaleQuantumToMap(GetPixelGreen(q));
blue=ScaleQuantumToMap(GetPixelBlue(q));
pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+
(MagickRealType) primary_info.x;
pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+
(MagickRealType) primary_info.y;
pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+
(MagickRealType) primary_info.z;
SetPixelRed(q,ScaleMapToQuantum(pixel.red));
SetPixelGreen(q,ScaleMapToQuantum(pixel.green));
SetPixelBlue(q,ScaleMapToQuantum(pixel.blue));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RGBTransformImage)
#endif
proceed=SetImageProgress(image,RGBTransformImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
break;
}
case PseudoClass:
{
register size_t
blue,
green,
red;
/*
Convert PseudoClass image.
*/
image_view=AcquireCacheView(image);
for (i=0; i < (ssize_t) image->colors; i++)
{
MagickPixelPacket
pixel;
red=ScaleQuantumToMap(image->colormap[i].red);
green=ScaleQuantumToMap(image->colormap[i].green);
blue=ScaleQuantumToMap(image->colormap[i].blue);
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z;
image->colormap[i].red=ScaleMapToQuantum(pixel.red);
image->colormap[i].green=ScaleMapToQuantum(pixel.green);
image->colormap[i].blue=ScaleMapToQuantum(pixel.blue);
}
image_view=DestroyCacheView(image_view);
(void) SyncImage(image);
break;
}
}
/*
Relinquish resources.
*/
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (SetImageColorspace(image,colorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColorspace() sets the colorspace member of the Image structure.
%
% The format of the SetImageColorspace method is:
%
% MagickBooleanType SetImageColorspace(Image *image,
% const ColorspaceType colorspace)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace.
%
*/
MagickExport MagickBooleanType SetImageColorspace(Image *image,
const ColorspaceType colorspace)
{
image->colorspace=colorspace;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImageColorspace() transforms an image colorspace.
%
% The format of the TransformImageColorspace method is:
%
% MagickBooleanType TransformImageColorspace(Image *image,
% const ColorspaceType colorspace)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace.
%
*/
MagickExport MagickBooleanType TransformImageColorspace(Image *image,
const ColorspaceType colorspace)
{
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (colorspace == UndefinedColorspace)
return(SetImageColorspace(image,colorspace));
if (image->colorspace == colorspace)
return(MagickTrue);
if ((colorspace == RGBColorspace) || (colorspace == TransparentColorspace))
return(TransformRGBImage(image,image->colorspace));
status=MagickTrue;
if ((image->colorspace != RGBColorspace) &&
(image->colorspace != TransparentColorspace) &&
(image->colorspace != GRAYColorspace))
status=TransformRGBImage(image,image->colorspace);
if (RGBTransformImage(image,colorspace) == MagickFalse)
status=MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a n s f o r m R G B I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformRGBImage() converts the reference image from an alternate
% colorspace to RGB. The transformation matrices are not the standard ones:
% the weights are rescaled to normalize the range of the transformed values to
% be [0..QuantumRange].
%
% The format of the TransformRGBImage method is:
%
% MagickBooleanType TransformRGBImage(Image *image,
% const ColorspaceType colorspace)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace to transform the image to.
%
*/
static double LabF2(double alpha)
{
double
beta;
if (alpha > (24.0/116.0))
return(alpha*alpha*alpha);
beta=(108.0/841.0)*(alpha-(16.0/116.0));
if (beta > 0.0)
return(beta);
return(0.0);
}
static inline void ConvertLabToXYZ(const double L,const double a,const double b,
double *X,double *Y,double *Z)
{
double
x,
y,
z;
assert(X != (double *) NULL);
assert(Y != (double *) NULL);
assert(Z != (double *) NULL);
*X=0.0;
*Y=0.0;
*Z=0.0;
if (L <= 0.0)
return;
y=(100.0*L+16.0)/116.0;
x=y+255.0*0.002*(a > 0.5 ? a-1.0 : a);
z=y-255.0*0.005*(b > 0.5 ? b-1.0 : b);
*X=D50X*LabF2(x);
*Y=D50Y*LabF2(y);
*Z=D50Z*LabF2(z);
}
static inline ssize_t RoundToYCC(const MagickRealType value)
{
if (value <= 0.0)
return(0);
if (value >= 1388.0)
return(1388);
return((ssize_t) (value+0.5));
}
static inline void ConvertXYZToRGB(const double x,const double y,const double z,
Quantum *red,Quantum *green,Quantum *blue)
{
double
b,
g,
r;
/*
Convert XYZ to RGB colorspace.
*/
assert(red != (Quantum *) NULL);
assert(green != (Quantum *) NULL);
assert(blue != (Quantum *) NULL);
r=3.2404542*x-1.5371385*y-0.4985314*z;
g=(-0.9692660*x+1.8760108*y+0.0415560*z);
b=0.0556434*x-0.2040259*y+1.0572252*z;
if (r > 0.0031308)
r=1.055*pow(r,1.0/2.4)-0.055;
else
r*=12.92;
if (g > 0.0031308)
g=1.055*pow(g,1.0/2.4)-0.055;
else
g*=12.92;
if (b > 0.0031308)
b=1.055*pow(b,1.0/2.4)-0.055;
else
b*=12.92;
*red=RoundToQuantum((MagickRealType) QuantumRange*r);
*green=RoundToQuantum((MagickRealType) QuantumRange*g);
*blue=RoundToQuantum((MagickRealType) QuantumRange*b);
}
static inline void ConvertCMYKToRGB(MagickPixelPacket *pixel)
{
pixel->red=(MagickRealType) QuantumRange-(QuantumScale*pixel->red*
(QuantumRange-pixel->index)+pixel->index);
pixel->green=(MagickRealType) QuantumRange-(QuantumScale*pixel->green*
(QuantumRange-pixel->index)+pixel->index);
pixel->blue=(MagickRealType) QuantumRange-(QuantumScale*pixel->blue*
(QuantumRange-pixel->index)+pixel->index);
}
MagickExport MagickBooleanType TransformRGBImage(Image *image,
const ColorspaceType colorspace)
{
#define D50X (0.9642)
#define D50Y (1.0)
#define D50Z (0.8249)
#define TransformRGBImageTag "Transform/Image"
#if !defined(MAGICKCORE_HDRI_SUPPORT)
static const float
YCCMap[1389] =
{
0.000000f, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f,
0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f,
0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f,
0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f,
0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f,
0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f,
0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f,
0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f,
0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f,
0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f,
0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f,
0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f,
0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f,
0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f,
0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f,
0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f,
0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f,
0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f,
0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f,
0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f,
0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f,
0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f,
0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f,
0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f,
0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f,
0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f,
0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f,
0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f,
0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f,
0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f,
0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f,
0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f,
0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f,
0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f,
0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f,
0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f,
0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f,
0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f,
0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f,
0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f,
0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f,
0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f,
0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f,
0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f,
0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f,
0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f,
0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f,
0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f,
0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f,
0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f,
0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f,
0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f,
0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f,
0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f,
0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f,
0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f,
0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f,
0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f,
0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f,
0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f,
0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f,
0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f,
0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f,
0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f,
0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f,
0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f,
0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f,
0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f,
0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f,
0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f,
0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f,
0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f,
0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f,
0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f,
0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f,
0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f,
0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f,
0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f,
0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f,
0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f,
0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f,
0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f,
0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f,
0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f,
0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f,
0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f,
0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f,
0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f,
0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f,
0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f,
0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f,
0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f,
0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f,
0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f,
0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f,
0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f,
0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f,
0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f,
0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f,
0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f,
0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f,
0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f,
0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f,
0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f,
0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f,
0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f,
0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f,
0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f,
0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f,
0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f,
0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f,
0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f,
0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f,
0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f,
0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f,
0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f,
0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f,
0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f,
0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f,
0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f,
0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f,
0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f,
0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f,
0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f,
0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f,
0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f,
0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f,
0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f,
0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f,
0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f,
0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f,
0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f,
0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f,
0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f,
0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f,
0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f,
0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f,
0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f,
0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f,
0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f,
0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f,
0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f,
0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f,
0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f,
0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f,
0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f,
0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f,
0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f,
0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f,
0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f,
0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f,
0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f,
0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f,
0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f,
0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f,
0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f,
0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f,
0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f,
0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f,
0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f,
0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f,
0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f,
0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f,
0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f,
0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f,
0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f,
0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f,
0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f,
0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f,
0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f,
0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f,
0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f,
0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f,
0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f,
0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f,
0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f,
0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f,
0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f,
0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f,
0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f,
0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f,
0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f,
0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f,
0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f,
0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f,
0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f,
0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f,
0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f,
0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f,
0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f,
0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f,
0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f,
0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f,
0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f,
0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f,
0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f,
0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f,
0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f,
0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f,
0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f,
0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f,
0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f,
0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f,
0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f,
0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f,
0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f,
0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f,
0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f,
0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f,
0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f,
0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f,
0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f,
0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f,
0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f,
0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f,
0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f,
0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f,
0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f,
0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f,
0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f,
0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f,
0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f,
0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f,
0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f,
0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f,
0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f,
0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f,
0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f,
0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f,
0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f,
0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f,
0.998559f, 0.999280f, 1.000000f
};
#endif
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
TransformPacket
*y_map,
*x_map,
*z_map;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
switch (colorspace)
{
case GRAYColorspace:
case Rec601LumaColorspace:
case Rec709LumaColorspace:
case RGBColorspace:
case TransparentColorspace:
case UndefinedColorspace:
return(MagickTrue);
default:
break;
}
status=MagickTrue;
progress=0;
exception=(&image->exception);
switch (colorspace)
{
case CMYColorspace:
{
/*
Transform image from CMY to RGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum((MagickRealType)
(QuantumRange-GetPixelRed(q))));
SetPixelGreen(q,ClampToQuantum((MagickRealType)
(QuantumRange-GetPixelGreen(q))));
SetPixelBlue(q,ClampToQuantum((MagickRealType)
(QuantumRange-GetPixelBlue(q))));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,RGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case CMYKColorspace:
{
MagickPixelPacket
zero;
/*
Transform image from CMYK to RGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
GetMagickPixelPacket(image,&zero);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel;
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
ConvertCMYKToRGB(&pixel);
SetPixelPacket(image,&pixel,q,indexes+x);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,RGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case HSBColorspace:
{
/*
Transform image from HSB to RGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
brightness,
hue,
saturation;
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
blue,
green,
red;
hue=(double) (QuantumScale*GetPixelRed(q));
saturation=(double) (QuantumScale*GetPixelGreen(q));
brightness=(double) (QuantumScale*GetPixelBlue(q));
ConvertHSBToRGB(hue,saturation,brightness,&red,&green,&blue);
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,RGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case HSLColorspace:
{
/*
Transform image from HSL to RGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
hue,
lightness,
saturation;
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
blue,
green,
red;
hue=(double) (QuantumScale*GetPixelRed(q));
saturation=(double) (QuantumScale*GetPixelGreen(q));
lightness=(double) (QuantumScale*GetPixelBlue(q));
ConvertHSLToRGB(hue,saturation,lightness,&red,&green,&blue);
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,RGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case HWBColorspace:
{
/*
Transform image from HWB to RGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
blackness,
hue,
whiteness;
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
blue,
green,
red;
hue=(double) (QuantumScale*GetPixelRed(q));
whiteness=(double) (QuantumScale*GetPixelGreen(q));
blackness=(double) (QuantumScale*GetPixelBlue(q));
ConvertHWBToRGB(hue,whiteness,blackness,&red,&green,&blue);
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,RGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case LabColorspace:
{
/*
Transform image from Lab to RGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
a,
b,
L,
X,
Y,
Z;
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
X=0.0;
Y=0.0;
Z=0.0;
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
blue,
green,
red;
L=QuantumScale*GetPixelRed(q);
a=QuantumScale*GetPixelGreen(q);
b=QuantumScale*GetPixelBlue(q);
ConvertLabToXYZ(L,a,b,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,&red,&green,&blue);
SetPixelRed(q,red);
SetPixelGreen(q,green);
SetPixelBlue(q,blue);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,RGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
case LogColorspace:
{
const char
*value;
double
black,
density,
film_gamma,
gamma,
reference_black,
reference_white;
Quantum
*logmap;
/*
Transform Log to RGB colorspace.
*/
density=DisplayGamma;
gamma=DisplayGamma;
value=GetImageProperty(image,"gamma");
if (value != (const char *) NULL)
gamma=1.0/InterpretLocaleValue(value,(char **) NULL) != 0.0 ?
InterpretLocaleValue(value,(char **) NULL) : 1.0;
film_gamma=FilmGamma;
value=GetImageProperty(image,"film-gamma");
if (value != (const char *) NULL)
film_gamma=InterpretLocaleValue(value,(char **) NULL);
reference_black=ReferenceBlack;
value=GetImageProperty(image,"reference-black");
if (value != (const char *) NULL)
reference_black=InterpretLocaleValue(value,(char **) NULL);
reference_white=ReferenceWhite;
value=GetImageProperty(image,"reference-white");
if (value != (const char *) NULL)
reference_white=InterpretLocaleValue(value,(char **) NULL);
logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*logmap));
if (logmap == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
black=pow(10.0,(reference_black-reference_white)*(gamma/density)*
0.002/film_gamma);
for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0); i++)
logmap[i]=(Quantum) 0;
for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0); i++)
logmap[i]=ClampToQuantum((MagickRealType) QuantumRange/(1.0-black)*
(pow(10.0,(1024.0*i/MaxMap-reference_white)*
(gamma/density)*0.002/film_gamma)-black));
for ( ; i <= (ssize_t) MaxMap; i++)
logmap[i]=(Quantum) QuantumRange;
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
SetPixelRed(q,logmap[ScaleQuantumToMap(
GetPixelRed(q))]);
SetPixelGreen(q,logmap[ScaleQuantumToMap(
GetPixelGreen(q))]);
SetPixelBlue(q,logmap[ScaleQuantumToMap(
GetPixelBlue(q))]);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
logmap=(Quantum *) RelinquishMagickMemory(logmap);
if (SetImageColorspace(image,RGBColorspace) == MagickFalse)
return(MagickFalse);
return(status);
}
default:
break;
}
/*
Allocate the tables.
*/
x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*x_map));
y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*y_map));
z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*z_map));
if ((x_map == (TransformPacket *) NULL) ||
(y_map == (TransformPacket *) NULL) ||
(z_map == (TransformPacket *) NULL))
{
if (z_map != (TransformPacket *) NULL)
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
if (y_map != (TransformPacket *) NULL)
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
if (x_map != (TransformPacket *) NULL)
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
switch (colorspace)
{
case OHTAColorspace:
{
/*
Initialize OHTA tables:
R = I1+1.00000*I2-0.66668*I3
G = I1+0.00000*I2+1.33333*I3
B = I1-1.00000*I2-0.66668*I3
I and Q, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) i;
y_map[i].x=0.500000f*(2.000000*(MagickRealType) i-(MagickRealType)
MaxMap);
z_map[i].x=(-0.333340f)*(2.000000f*(MagickRealType) i-(MagickRealType)
MaxMap);
x_map[i].y=(MagickRealType) i;
y_map[i].y=0.000000f;
z_map[i].y=0.666665f*(2.000000f*(MagickRealType) i-(MagickRealType)
MaxMap);
x_map[i].z=(MagickRealType) i;
y_map[i].z=(-0.500000f)*(2.000000f*(MagickRealType) i-(MagickRealType)
MaxMap);
z_map[i].z=(-0.333340f)*(2.000000f*(MagickRealType) i-(MagickRealType)
MaxMap);
}
break;
}
case Rec601YCbCrColorspace:
case YCbCrColorspace:
{
/*
Initialize YCbCr tables:
R = Y +1.402000*Cr
G = Y-0.344136*Cb-0.714136*Cr
B = Y+1.772000*Cb
Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) i;
y_map[i].x=0.000000f;
z_map[i].x=(1.402000f*0.500000f)*(2.000000f*(MagickRealType) i-
(MagickRealType) MaxMap);
x_map[i].y=(MagickRealType) i;
y_map[i].y=(-0.344136f*0.500000f)*(2.000000f*(MagickRealType) i-
(MagickRealType) MaxMap);
z_map[i].y=(-0.714136f*0.500000f)*(2.000000f*(MagickRealType) i-
(MagickRealType) MaxMap);
x_map[i].z=(MagickRealType) i;
y_map[i].z=(1.772000f*0.500000f)*(2.000000f*(MagickRealType) i-
(MagickRealType) MaxMap);
z_map[i].z=0.000000f;
}
break;
}
case Rec709YCbCrColorspace:
{
/*
Initialize YCbCr tables:
R = Y +1.574800*Cr
G = Y-0.187324*Cb-0.468124*Cr
B = Y+1.855600*Cb
Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) i;
y_map[i].x=0.000000f;
z_map[i].x=(1.574800f*0.50000f)*(2.00000f*(MagickRealType) i-
(MagickRealType) MaxMap);
x_map[i].y=(MagickRealType) i;
y_map[i].y=(-0.187324f*0.50000f)*(2.00000f*(MagickRealType) i-
(MagickRealType) MaxMap);
z_map[i].y=(-0.468124f*0.50000f)*(2.00000f*(MagickRealType) i-
(MagickRealType) MaxMap);
x_map[i].z=(MagickRealType) i;
y_map[i].z=(1.855600f*0.50000f)*(2.00000f*(MagickRealType) i-
(MagickRealType) MaxMap);
z_map[i].z=0.00000f;
}
break;
}
case sRGBColorspace:
{
/*
Nonlinear sRGB to linear RGB.
R = 1.0*R+0.0*G+0.0*B
G = 0.0*R+1.0*G+0.0*B
B = 0.0*R+0.0*G+1.0*B
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=1.0f*(MagickRealType) i;
y_map[i].x=0.0f*(MagickRealType) i;
z_map[i].x=0.0f*(MagickRealType) i;
x_map[i].y=0.0f*(MagickRealType) i;
y_map[i].y=1.0f*(MagickRealType) i;
z_map[i].y=0.0f*(MagickRealType) i;
x_map[i].z=0.0f*(MagickRealType) i;
y_map[i].z=0.0f*(MagickRealType) i;
z_map[i].z=1.0f*(MagickRealType) i;
}
break;
}
case XYZColorspace:
{
/*
Initialize CIE XYZ tables (ITU R-709 RGB):
R = 3.2404542*X-1.5371385*Y-0.4985314*Z
G = -0.9692660*X+1.8760108*Y+0.0415560*Z
B = 0.0556434*X-0.2040259*Y+1.057225*Z
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=3.2404542f*(MagickRealType) i;
x_map[i].y=(-0.9692660f)*(MagickRealType) i;
x_map[i].z=0.0556434f*(MagickRealType) i;
y_map[i].x=(-1.5371385f)*(MagickRealType) i;
y_map[i].y=1.8760108f*(MagickRealType) i;
y_map[i].z=(-0.2040259f)*(MagickRealType) i;
z_map[i].x=(-0.4985314f)*(MagickRealType) i;
z_map[i].y=0.0415560f*(MagickRealType) i;
z_map[i].z=1.0572252f*(MagickRealType) i;
}
break;
}
case YCCColorspace:
{
/*
Initialize YCC tables:
R = Y +1.340762*C2
G = Y-0.317038*C1-0.682243*C2
B = Y+1.632639*C1
YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=1.3584000f*(MagickRealType) i;
y_map[i].x=0.0000000f;
z_map[i].x=1.8215000f*((MagickRealType) i-(MagickRealType)
ScaleQuantumToMap(ScaleCharToQuantum(137)));
x_map[i].y=1.3584000f*(MagickRealType) i;
y_map[i].y=(-0.4302726f)*((MagickRealType) i-(MagickRealType)
ScaleQuantumToMap(ScaleCharToQuantum(156)));
z_map[i].y=(-0.9271435f)*((MagickRealType) i-(MagickRealType)
ScaleQuantumToMap(ScaleCharToQuantum(137)));
x_map[i].z=1.3584000f*(MagickRealType) i;
y_map[i].z=2.2179000f*((MagickRealType) i-(MagickRealType)
ScaleQuantumToMap(ScaleCharToQuantum(156)));
z_map[i].z=0.0000000f;
}
break;
}
case YIQColorspace:
{
/*
Initialize YIQ tables:
R = Y+0.95620*I+0.62140*Q
G = Y-0.27270*I-0.64680*Q
B = Y-1.10370*I+1.70060*Q
I and Q, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) i;
y_map[i].x=0.47810f*(2.00000f*(MagickRealType) i-(MagickRealType)
MaxMap);
z_map[i].x=0.31070f*(2.00000f*(MagickRealType) i-(MagickRealType)
MaxMap);
x_map[i].y=(MagickRealType) i;
y_map[i].y=(-0.13635f)*(2.00000f*(MagickRealType) i-(MagickRealType)
MaxMap);
z_map[i].y=(-0.32340f)*(2.00000f*(MagickRealType) i-(MagickRealType)
MaxMap);
x_map[i].z=(MagickRealType) i;
y_map[i].z=(-0.55185f)*(2.00000f*(MagickRealType) i-(MagickRealType)
MaxMap);
z_map[i].z=0.85030f*(2.00000f*(MagickRealType) i-(MagickRealType)
MaxMap);
}
break;
}
case YPbPrColorspace:
{
/*
Initialize YPbPr tables:
R = Y +1.402000*C2
G = Y-0.344136*C1+0.714136*C2
B = Y+1.772000*C1
Pb and Pr, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) i;
y_map[i].x=0.000000f;
z_map[i].x=0.701000f*(2.00000f*(MagickRealType) i-(MagickRealType)
MaxMap);
x_map[i].y=(MagickRealType) i;
y_map[i].y=(-0.172068f)*(2.00000f*(MagickRealType) i-(MagickRealType)
MaxMap);
z_map[i].y=0.357068f*(2.00000f*(MagickRealType) i-(MagickRealType)
MaxMap);
x_map[i].z=(MagickRealType) i;
y_map[i].z=0.88600f*(2.00000f*(MagickRealType) i-(MagickRealType)
MaxMap);
z_map[i].z=0.00000f;
}
break;
}
case YUVColorspace:
default:
{
/*
Initialize YUV tables:
R = Y +1.13980*V
G = Y-0.39380*U-0.58050*V
B = Y+2.02790*U
U and V, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) i;
y_map[i].x=0.00000f;
z_map[i].x=0.56990f*(2.0000f*(MagickRealType) i-(MagickRealType)
MaxMap);
x_map[i].y=(MagickRealType) i;
y_map[i].y=(-0.19690f)*(2.00000f*(MagickRealType) i-(MagickRealType)
MaxMap);
z_map[i].y=(-0.29025f)*(2.00000f*(MagickRealType) i-(MagickRealType)
MaxMap);
x_map[i].z=(MagickRealType) i;
y_map[i].z=1.01395f*(2.00000f*(MagickRealType) i-(MagickRealType)
MaxMap);
z_map[i].z=0.00000f;
}
break;
}
}
/*
Convert to RGB.
*/
switch (image->storage_class)
{
case DirectClass:
default:
{
/*
Convert DirectClass image.
*/
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register size_t
blue,
green,
red;
red=ScaleQuantumToMap(GetPixelRed(q));
green=ScaleQuantumToMap(GetPixelGreen(q));
blue=ScaleQuantumToMap(GetPixelBlue(q));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z;
switch (colorspace)
{
case YCCColorspace:
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*QuantumScale*
pixel.red)];
pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*QuantumScale*
pixel.green)];
pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*QuantumScale*
pixel.blue)];
#endif
break;
}
case sRGBColorspace:
{
if ((QuantumScale*pixel.red) <= 0.0031308)
pixel.red*=12.92f;
else
pixel.red=(MagickRealType) QuantumRange*(1.055*
pow(QuantumScale*pixel.red,(1.0/2.4))-0.055);
if ((QuantumScale*pixel.green) <= 0.0031308)
pixel.green*=12.92f;
else
pixel.green=(MagickRealType) QuantumRange*(1.055*
pow(QuantumScale*pixel.green,(1.0/2.4))-0.055);
if ((QuantumScale*pixel.blue) <= 0.0031308)
pixel.blue*=12.92f;
else
pixel.blue=(MagickRealType) QuantumRange*(1.055*
pow(QuantumScale*pixel.blue,(1.0/2.4))-0.055);
break;
}
default:
break;
}
SetPixelRed(q,ScaleMapToQuantum((MagickRealType) MaxMap*
QuantumScale*pixel.red));
SetPixelGreen(q,ScaleMapToQuantum((MagickRealType) MaxMap*
QuantumScale*pixel.green));
SetPixelBlue(q,ScaleMapToQuantum((MagickRealType) MaxMap*
QuantumScale*pixel.blue));
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransformRGBImage)
#endif
proceed=SetImageProgress(image,TransformRGBImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
break;
}
case PseudoClass:
{
/*
Convert PseudoClass image.
*/
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
MagickPixelPacket
pixel;
register size_t
blue,
green,
red;
red=ScaleQuantumToMap(image->colormap[i].red);
green=ScaleQuantumToMap(image->colormap[i].green);
blue=ScaleQuantumToMap(image->colormap[i].blue);
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z;
switch (colorspace)
{
case YCCColorspace:
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
image->colormap[i].red=(Quantum) (QuantumRange*YCCMap[
RoundToYCC(1024.0*QuantumScale*pixel.red)]);
image->colormap[i].green=(Quantum) (QuantumRange*YCCMap[
RoundToYCC(1024.0*QuantumScale*pixel.green)]);
image->colormap[i].blue=(Quantum) (QuantumRange*YCCMap[
RoundToYCC(1024.0*QuantumScale*pixel.blue)]);
#endif
break;
}
case sRGBColorspace:
{
if ((QuantumScale*pixel.red) <= 0.0031308)
pixel.red*=12.92f;
else
pixel.red=(MagickRealType) QuantumRange*(1.055*pow(QuantumScale*
pixel.red,(1.0/2.4))-0.055);
if ((QuantumScale*pixel.green) <= 0.0031308)
pixel.green*=12.92f;
else
pixel.green=(MagickRealType) QuantumRange*(1.055*pow(QuantumScale*
pixel.green,(1.0/2.4))-0.055);
if ((QuantumScale*pixel.blue) <= 0.0031308)
pixel.blue*=12.92f;
else
pixel.blue=(MagickRealType) QuantumRange*(1.055*pow(QuantumScale*
pixel.blue,(1.0/2.4))-0.055);
}
default:
{
image->colormap[i].red=ScaleMapToQuantum((MagickRealType) MaxMap*
QuantumScale*pixel.red);
image->colormap[i].green=ScaleMapToQuantum((MagickRealType) MaxMap*
QuantumScale*pixel.green);
image->colormap[i].blue=ScaleMapToQuantum((MagickRealType) MaxMap*
QuantumScale*pixel.blue);
break;
}
}
}
image_view=DestroyCacheView(image_view);
(void) SyncImage(image);
break;
}
}
/*
Relinquish resources.
*/
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (SetImageColorspace(image,RGBColorspace) == MagickFalse)
return(MagickFalse);
return(MagickTrue);
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 32;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
averageOfMatrix.c | #include <stdio.h>
#include <omp.h>
int main(){
int i, j, n;
double sum = 0.0;
printf("Enter matrix dimension = ");
scanf("%d", &n);
int a[n][n];
printf("Enter matrix values\n");
for (i = 0; i < n; i++){
for (j = 0; j < n; j++){
printf("a[%d][%d] = ", i, j);
scanf("%d", &a[i][j]);
}
}
omp_set_dynamic(0);
int m = omp_get_num_procs();
omp_set_num_threads(m);
for (i = 0; i < n; i++){
#pragma omp parallel for reduction(+:sum)
for (j = 0; j < n; j++){
sum += a[i][j];
}
}
printf("\nAverage = %.2f\n", sum / (n*n));
return 0;
} |
GB_unop__ainv_uint64_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__ainv_uint64_uint64)
// op(A') function: GB (_unop_tran__ainv_uint64_uint64)
// C type: uint64_t
// A type: uint64_t
// cast: uint64_t cij = aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = aij ; \
Cx [pC] = -z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__ainv_uint64_uint64)
(
uint64_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
uint64_t z = aij ;
Cx [p] = -z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint64_t aij = Ax [p] ;
uint64_t z = aij ;
Cx [p] = -z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__ainv_uint64_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_critical.c | <ompts:test>
<ompts:testdescription>Test which checks the omp critical directive by counting up a variable in a parallelized loop within a critical section.</ompts:testdescription>
<ompts:ompversion>2.0</ompts:ompversion>
<ompts:directive>omp critical</ompts:directive>
<ompts:testcode>
#include <stdio.h>
#include <unistd.h>
#include "omp_testsuite.h"
#include "omp_my_sleep.h"
int <ompts:testcode:functionname>omp_critical</ompts:testcode:functionname> (FILE * logFile)
{
<ompts:orphan:vars>
int sum;
</ompts:orphan:vars>
int known_sum;
sum = 0;
#pragma omp parallel
{
<ompts:orphan>
int i;
#pragma omp for
for (i = 0; i < 1000; i++)
{
<ompts:check>#pragma omp critical</ompts:check>
{
sum = sum + i;
} /* end of critical */
} /* end of for */
</ompts:orphan>
} /* end of parallel */
known_sum = 999 * 1000 / 2;
return (known_sum == sum);
}
</ompts:testcode>
</ompts:test>
|
GB_unop__abs_uint16_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__abs_uint16_uint16
// op(A') function: GB_unop_tran__abs_uint16_uint16
// C type: uint16_t
// A type: uint16_t
// cast: uint16_t cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__abs_uint16_uint16
(
uint16_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
uint16_t z = aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__abs_uint16_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__isge_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__isge_int32
// A.*B function (eWiseMult): GB_AemultB__isge_int32
// A*D function (colscale): GB_AxD__isge_int32
// D*A function (rowscale): GB_DxB__isge_int32
// C+=B function (dense accum): GB_Cdense_accumB__isge_int32
// C+=b function (dense accum): GB_Cdense_accumb__isge_int32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isge_int32
// C=scalar+B GB_bind1st__isge_int32
// C=scalar+B' GB_bind1st_tran__isge_int32
// C=A+scalar GB_bind2nd__isge_int32
// C=A'+scalar GB_bind2nd_tran__isge_int32
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x >= y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGE || GxB_NO_INT32 || GxB_NO_ISGE_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__isge_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__isge_int32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__isge_int32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__isge_int32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__isge_int32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__isge_int32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__isge_int32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__isge_int32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = Bx [p] ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__isge_int32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = Ax [p] ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB_bind1st_tran__isge_int32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB_bind2nd_tran__isge_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
480d1a_prot_ac_so4.c | #define _POSIX_C_SOURCE 200809L
#define START_TIMER(S) \
struct timeval start_##S, end_##S; \
gettimeofday(&start_##S, NULL);
#define STOP_TIMER(S, T) \
gettimeofday(&end_##S, NULL); \
T->S += (double)(end_##S.tv_sec - start_##S.tv_sec) + (double)(end_##S.tv_usec - start_##S.tv_usec) / 1000000;
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "xmmintrin.h"
#include "pmmintrin.h"
#include "omp.h"
struct dataobj
{
void *restrict data;
int *size;
int *npsize;
int *dsize;
int *hsize;
int *hofs;
int *oofs;
};
struct profiler
{
double section0;
double section1;
};
void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, const int t0, const int t1, const int t2, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads);
int Forward(struct dataobj *restrict damp_vec, const float dt, const float o_x, const float o_y, const float o_z, struct dataobj *restrict src_vec, struct dataobj *restrict src_coords_vec, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int p_src_M, const int p_src_m, const int time_M, const int time_m, const int x0_blk0_size, const int y0_blk0_size, const int nthreads, const int nthreads_nonaffine, struct profiler *timers)
{
float(*restrict src)[src_vec->size[1]] __attribute__((aligned(64))) = (float(*)[src_vec->size[1]])src_vec->data;
float(*restrict src_coords)[src_coords_vec->size[1]] __attribute__((aligned(64))) = (float(*)[src_coords_vec->size[1]])src_coords_vec->data;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data;
/* Flush denormal numbers to zero in hardware */
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
for (int time = time_m, t0 = (time) % (3), t1 = (time + 2) % (3), t2 = (time + 1) % (3); time <= time_M; time += 1, t0 = (time) % (3), t1 = (time + 2) % (3), t2 = (time + 1) % (3))
{
/* Begin section0 */
START_TIMER(section0)
bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, x0_blk0_size, x_M - (x_M - x_m + 1) % (x0_blk0_size), x_m, y0_blk0_size, y_M - (y_M - y_m + 1) % (y0_blk0_size), y_m, z_M, z_m, nthreads);
bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, x0_blk0_size, x_M - (x_M - x_m + 1) % (x0_blk0_size), x_m, (y_M - y_m + 1) % (y0_blk0_size), y_M, y_M - (y_M - y_m + 1) % (y0_blk0_size) + 1, z_M, z_m, nthreads);
bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, (x_M - x_m + 1) % (x0_blk0_size), x_M, x_M - (x_M - x_m + 1) % (x0_blk0_size) + 1, y0_blk0_size, y_M - (y_M - y_m + 1) % (y0_blk0_size), y_m, z_M, z_m, nthreads);
bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, (x_M - x_m + 1) % (x0_blk0_size), x_M, x_M - (x_M - x_m + 1) % (x0_blk0_size) + 1, (y_M - y_m + 1) % (y0_blk0_size), y_M, y_M - (y_M - y_m + 1) % (y0_blk0_size) + 1, z_M, z_m, nthreads);
STOP_TIMER(section0, timers)
/* End section0 */
/* Begin section1 */
START_TIMER(section1)
#pragma omp parallel num_threads(nthreads_nonaffine)
{
int chunk_size = (int)(fmax(1, (1.0F / 3.0F) * (p_src_M - p_src_m + 1) / nthreads_nonaffine));
#pragma omp for collapse(1) schedule(dynamic, chunk_size)
for (int p_src = p_src_m; p_src <= p_src_M; p_src += 1)
{
float posx = -o_x + src_coords[p_src][0];
float posy = -o_y + src_coords[p_src][1];
float posz = -o_z + src_coords[p_src][2];
int ii_src_0 = (int)(floor(6.66667e-2 * posx));
int ii_src_1 = (int)(floor(6.66667e-2 * posy));
int ii_src_2 = (int)(floor(6.66667e-2 * posz));
int ii_src_3 = (int)(floor(6.66667e-2 * posz)) + 1;
int ii_src_4 = (int)(floor(6.66667e-2 * posy)) + 1;
int ii_src_5 = (int)(floor(6.66667e-2 * posx)) + 1;
float px = (float)(posx - 1.5e+1F * (int)(floor(6.66667e-2F * posx)));
float py = (float)(posy - 1.5e+1F * (int)(floor(6.66667e-2F * posy)));
float pz = (float)(posz - 1.5e+1F * (int)(floor(6.66667e-2F * posz)));
if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1)
{
float r0 = (dt * dt) * (vp[ii_src_0 + 4][ii_src_1 + 4][ii_src_2 + 4] * vp[ii_src_0 + 4][ii_src_1 + 4][ii_src_2 + 4]) * (-2.96296e-4F * px * py * pz + 4.44445e-3F * px * py + 4.44445e-3F * px * pz - 6.66667e-2F * px + 4.44445e-3F * py * pz - 6.66667e-2F * py - 6.66667e-2F * pz + 1) * src[time][p_src];
#pragma omp atomic update
u[t2][ii_src_0 + 4][ii_src_1 + 4][ii_src_2 + 4] += r0;
}
if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1)
{
float r1 = (dt * dt) * (vp[ii_src_0 + 4][ii_src_1 + 4][ii_src_3 + 4] * vp[ii_src_0 + 4][ii_src_1 + 4][ii_src_3 + 4]) * (2.96296e-4F * px * py * pz - 4.44445e-3F * px * pz - 4.44445e-3F * py * pz + 6.66667e-2F * pz) * src[time][p_src];
#pragma omp atomic update
u[t2][ii_src_0 + 4][ii_src_1 + 4][ii_src_3 + 4] += r1;
}
if (ii_src_0 >= x_m - 1 && ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1)
{
float r2 = (dt * dt) * (vp[ii_src_0 + 4][ii_src_4 + 4][ii_src_2 + 4] * vp[ii_src_0 + 4][ii_src_4 + 4][ii_src_2 + 4]) * (2.96296e-4F * px * py * pz - 4.44445e-3F * px * py - 4.44445e-3F * py * pz + 6.66667e-2F * py) * src[time][p_src];
#pragma omp atomic update
u[t2][ii_src_0 + 4][ii_src_4 + 4][ii_src_2 + 4] += r2;
}
if (ii_src_0 >= x_m - 1 && ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1)
{
float r3 = (dt * dt) * (vp[ii_src_0 + 4][ii_src_4 + 4][ii_src_3 + 4] * vp[ii_src_0 + 4][ii_src_4 + 4][ii_src_3 + 4]) * (-2.96296e-4F * px * py * pz + 4.44445e-3F * py * pz) * src[time][p_src];
#pragma omp atomic update
u[t2][ii_src_0 + 4][ii_src_4 + 4][ii_src_3 + 4] += r3;
}
if (ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1 && ii_src_5 <= x_M + 1)
{
float r4 = (dt * dt) * (vp[ii_src_5 + 4][ii_src_1 + 4][ii_src_2 + 4] * vp[ii_src_5 + 4][ii_src_1 + 4][ii_src_2 + 4]) * (2.96296e-4F * px * py * pz - 4.44445e-3F * px * py - 4.44445e-3F * px * pz + 6.66667e-2F * px) * src[time][p_src];
#pragma omp atomic update
u[t2][ii_src_5 + 4][ii_src_1 + 4][ii_src_2 + 4] += r4;
}
if (ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1 && ii_src_5 <= x_M + 1)
{
float r5 = (dt * dt) * (vp[ii_src_5 + 4][ii_src_1 + 4][ii_src_3 + 4] * vp[ii_src_5 + 4][ii_src_1 + 4][ii_src_3 + 4]) * (-2.96296e-4F * px * py * pz + 4.44445e-3F * px * pz) * src[time][p_src];
#pragma omp atomic update
u[t2][ii_src_5 + 4][ii_src_1 + 4][ii_src_3 + 4] += r5;
}
if (ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1)
{
float r6 = (dt * dt) * (vp[ii_src_5 + 4][ii_src_4 + 4][ii_src_2 + 4] * vp[ii_src_5 + 4][ii_src_4 + 4][ii_src_2 + 4]) * (-2.96296e-4F * px * py * pz + 4.44445e-3F * px * py) * src[time][p_src];
#pragma omp atomic update
u[t2][ii_src_5 + 4][ii_src_4 + 4][ii_src_2 + 4] += r6;
}
if (ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1)
{
float r7 = 2.96296e-4F * px * py * pz * (dt * dt) * (vp[ii_src_5 + 4][ii_src_4 + 4][ii_src_3 + 4] * vp[ii_src_5 + 4][ii_src_4 + 4][ii_src_3 + 4]) * src[time][p_src];
#pragma omp atomic update
u[t2][ii_src_5 + 4][ii_src_4 + 4][ii_src_3 + 4] += r7;
}
}
}
STOP_TIMER(section1, timers)
/* End section1 */
}
return 0;
}
void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, const int t0, const int t1, const int t2, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads)
{
float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data;
if (x0_blk0_size == 0 || y0_blk0_size == 0)
{
return;
}
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(2) schedule(dynamic, 1)
for (int x0_blk0 = x_m; x0_blk0 <= x_M; x0_blk0 += x0_blk0_size)
{
for (int y0_blk0 = y_m; y0_blk0 <= y_M; y0_blk0 += y0_blk0_size)
{
for (int x = x0_blk0; x <= x0_blk0 + x0_blk0_size - 1; x += 1)
{
for (int y = y0_blk0; y <= y0_blk0 + y0_blk0_size - 1; y += 1)
{
#pragma omp simd aligned(damp, u, vp : 32)
for (int z = z_m; z <= z_M; z += 1)
{
float r15 = 1.0 / dt;
float r14 = 1.0 / (dt * dt);
float r13 = 1.0 / (vp[x + 4][y + 4][z + 4] * vp[x + 4][y + 4][z + 4]);
u[t2][x + 4][y + 4][z + 4] = (r13 * (-r14 * (-2.0F * u[t0][x + 4][y + 4][z + 4] + u[t1][x + 4][y + 4][z + 4])) + r15 * (damp[x + 1][y + 1][z + 1] * u[t0][x + 4][y + 4][z + 4]) - 3.70370379e-4F * (u[t0][x + 2][y + 4][z + 4] + u[t0][x + 4][y + 2][z + 4] + u[t0][x + 4][y + 4][z + 2] + u[t0][x + 4][y + 4][z + 6] + u[t0][x + 4][y + 6][z + 4] + u[t0][x + 6][y + 4][z + 4]) + 5.92592607e-3F * (u[t0][x + 3][y + 4][z + 4] + u[t0][x + 4][y + 3][z + 4] + u[t0][x + 4][y + 4][z + 3] + u[t0][x + 4][y + 4][z + 5] + u[t0][x + 4][y + 5][z + 4] + u[t0][x + 5][y + 4][z + 4]) - 3.33333341e-2F * u[t0][x + 4][y + 4][z + 4]) / (r13 * r14 + r15 * damp[x + 1][y + 1][z + 1]);
}
}
}
}
}
}
}
|
is.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 2.3 OpenMP C versions - IS
This benchmark is an OpenMP C version of the NPB IS code.
The OpenMP C versions are developed by RWCP and derived from the serial
Fortran versions in "NPB 2.3-serial" developed by NAS.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Author: M. Yarrow
OpenMP C version: S. Satoh
--------------------------------------------------------------------*/
#include "npbparams.h"
#include <stdlib.h>
#include <stdio.h>
//#if defined(_OPENMP)
//#include <omp.h>
//#endif /* _OPENMP */
/*****************************************************************/
/* For serial IS, buckets are not really req'd to solve NPB1 IS */
/* spec, but their use on some machines improves performance, on */
/* other machines the use of buckets compromises performance, */
/* probably because it is extra computation which is not req'd. */
/* (Note: Mechanism not understood, probably cache related) */
/* Example: SP2-66MhzWN: 50% speedup with buckets */
/* Example: SGI Indy5000: 50% slowdown with buckets */
/* Example: SGI O2000: 400% slowdown with buckets (Wow!) */
/*****************************************************************/
/* #define USE_BUCKETS */
/* buckets are not used in the OpenMP C version */
/******************/
/* default values */
/******************/
#ifndef CLASS
#define CLASS 'S'
#endif
/*************/
/* CLASS S */
/*************/
#if CLASS == 'S'
#define TOTAL_KEYS_LOG_2 16
#define MAX_KEY_LOG_2 11
#define NUM_BUCKETS_LOG_2 9
#endif
/*************/
/* CLASS W */
/*************/
#if CLASS == 'W'
#define TOTAL_KEYS_LOG_2 20
#define MAX_KEY_LOG_2 16
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS A */
/*************/
#if CLASS == 'A'
#define TOTAL_KEYS_LOG_2 23
#define MAX_KEY_LOG_2 19
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS B */
/*************/
#if CLASS == 'B'
#define TOTAL_KEYS_LOG_2 25
#define MAX_KEY_LOG_2 21
#define NUM_BUCKETS_LOG_2 10
#endif
/*************/
/* CLASS C */
/*************/
#if CLASS == 'C'
#define TOTAL_KEYS_LOG_2 27
#define MAX_KEY_LOG_2 23
#define NUM_BUCKETS_LOG_2 10
#endif
#define TOTAL_KEYS (1 << TOTAL_KEYS_LOG_2)
#define MAX_KEY (1 << MAX_KEY_LOG_2)
#define NUM_BUCKETS (1 << NUM_BUCKETS_LOG_2)
#define NUM_KEYS TOTAL_KEYS
#define SIZE_OF_BUFFERS NUM_KEYS
#define MAX_ITERATIONS 10
#define TEST_ARRAY_SIZE 5
/*************************************/
/* Typedef: if necessary, change the */
/* size of int here by changing the */
/* int type to, say, long */
/*************************************/
#include <omp.h>
typedef int INT_TYPE;
/********************/
/* Some global info */
/********************/
/* used by full_verify to get */
INT_TYPE *key_buff_ptr_global;
/* copies of rank info */
int passed_verification;
/************************************/
/* These are the three main arrays. */
/* See SIZE_OF_BUFFERS def above */
/************************************/
INT_TYPE key_array[8388608];
INT_TYPE key_buff1[8388608];
INT_TYPE key_buff2[8388608];
INT_TYPE partial_verify_vals[5];
#ifdef USE_BUCKETS
#endif
/**********************/
/* Partial verif info */
/**********************/
INT_TYPE test_index_array[5];
INT_TYPE test_rank_array[5];
INT_TYPE S_test_index_array[5] = {(48427), (17148), (23627), (62548), (4431)};
INT_TYPE S_test_rank_array[5] = {(0), (18), (346), (64917), (65463)};
INT_TYPE W_test_index_array[5] = {(357773), (934767), (875723), (898999), (404505)};
INT_TYPE W_test_rank_array[5] = {(1249), (11698), (1039987), (1043896), (1048018)};
INT_TYPE A_test_index_array[5] = {(2112377), (662041), (5336171), (3642833), (4250760)};
INT_TYPE A_test_rank_array[5] = {(104), (17523), (123928), (8288932), (8388264)};
INT_TYPE B_test_index_array[5] = {(41869), (812306), (5102857), (18232239), (26860214)};
INT_TYPE B_test_rank_array[5] = {(33422937), (10244), (59149), (33135281), (99)};
INT_TYPE C_test_index_array[5] = {(44172927), (72999161), (74326391), (129606274), (21736814)};
INT_TYPE C_test_rank_array[5] = {(61147), (882988), (266290), (133997595), (133525895)};
/***********************/
/* function prototypes */
/***********************/
double randlc(double *X,double *A);
void full_verify();
/*
* FUNCTION RANDLC (X, A)
*
* This routine returns a uniform pseudorandom double precision number in the
* range (0, 1) by using the linear congruential generator
*
* x_{k+1} = a x_k (mod 2^46)
*
* where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
* before repeating. The argument A is the same as 'a' in the above formula,
* and X is the same as x_0. A and X must be odd double precision integers
* in the range (1, 2^46). The returned value RANDLC is normalized to be
* between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain
* the new seed x_1, so that subsequent calls to RANDLC using the same
* arguments will generate a continuous sequence.
*
* This routine should produce the same results on any computer with at least
* 48 mantissa bits in double precision floating point data. On Cray systems,
* double precision should be disabled.
*
* David H. Bailey October 26, 1990
*
* IMPLICIT DOUBLE PRECISION (A-H, O-Z)
* SAVE KS, R23, R46, T23, T46
* DATA KS/0/
*
* If this is the first call to RANDLC, compute R23 = 2 ^ -23, R46 = 2 ^ -46,
* T23 = 2 ^ 23, and T46 = 2 ^ 46. These are computed in loops, rather than
* by merely using the ** operator, in order to insure that the results are
* exact on all systems. This code assumes that 0.5D0 is represented exactly.
*/
/*****************************************************************/
/************* R A N D L C ************/
/************* ************/
/************* portable random number generator ************/
/*****************************************************************/
double randlc(X,A)
double *X;
double *A;
{
static int KS = 0;
static double R23;
static double R46;
static double T23;
static double T46;
double T1;
double T2;
double T3;
double T4;
double A1;
double A2;
double X1;
double X2;
double Z;
int i;
int j;
if (KS == 0) {
R23 = 1.0;
R46 = 1.0;
T23 = 1.0;
T46 = 1.0;
#pragma omp parallel for private (i) reduction (*:R23,T23)
for (i = 1; i <= 23; i += 1) {
R23 = 0.50 * R23;
T23 = 2.0 * T23;
}
#pragma omp parallel for private (i) reduction (*:R46,T46)
for (i = 1; i <= 46; i += 1) {
R46 = 0.50 * R46;
T46 = 2.0 * T46;
}
KS = 1;
}
/* Break A into two parts such that A = 2^23 * A1 + A2 and set X = N. */
T1 = R23 * *A;
j = T1;
A1 = j;
A2 = *A - T23 * A1;
/* Break X into two parts such that X = 2^23 * X1 + X2, compute
Z = A1 * X2 + A2 * X1 (mod 2^23), and then
X = 2^23 * Z + A2 * X2 (mod 2^46). */
T1 = R23 * *X;
j = T1;
X1 = j;
X2 = *X - T23 * X1;
T1 = A1 * X2 + A2 * X1;
j = (R23 * T1);
T2 = j;
Z = T1 - T23 * T2;
T3 = T23 * Z + A2 * X2;
j = (R46 * T3);
T4 = j;
*X = T3 - T46 * T4;
return R46 * *X;
}
/*****************************************************************/
/************* C R E A T E _ S E Q ************/
/*****************************************************************/
void create_seq(double seed,double a)
{
double x;
int i;
int j;
int k;
k = (1 << 19) / 4;
for (i = 0; i <= 8388607; i += 1) {
x = randlc(&seed,&a);
x += randlc(&seed,&a);
x += randlc(&seed,&a);
x += randlc(&seed,&a);
key_array[i] = (k * x);
}
}
/*****************************************************************/
/************* F U L L _ V E R I F Y ************/
/*****************************************************************/
void full_verify()
{
INT_TYPE i;
INT_TYPE j;
INT_TYPE k;
INT_TYPE m;
INT_TYPE unique_keys;
/* Now, finally, sort the keys: */
for (i = 0; i <= 8388607; i += 1) {
key_array[--key_buff_ptr_global[key_buff2[i]]] = key_buff2[i];
}
/* Confirm keys correctly sorted: count incorrectly sorted keys, if any */
j = 0;
#pragma omp parallel for private (i) reduction (+:j)
for (i = 1; i <= 8388607; i += 1) {
if (key_array[i - 1] > key_array[i])
j++;
}
if (j != 0) {
printf("Full_verify: number of keys out of sort: %d\n",j);
}
else
passed_verification++;
}
/*****************************************************************/
/************* R A N K ****************/
/*****************************************************************/
void rank(int iteration)
{
INT_TYPE i;
INT_TYPE j;
INT_TYPE k;
INT_TYPE l;
INT_TYPE m;
INT_TYPE shift = 19 - 10;
INT_TYPE key;
INT_TYPE min_key_val;
INT_TYPE max_key_val;
INT_TYPE prv_buff1[524288];
{
key_array[iteration] = iteration;
key_array[iteration + 10] = (1 << 19) - iteration;
/* Determine where the partial verify test keys are, load into */
/* top of array bucket_size */
#pragma omp parallel for private (i)
for (i = 0; i <= 4; i += 1) {
partial_verify_vals[i] = key_array[test_index_array[i]];
}
/* Clear the work array */
#pragma omp parallel for private (i)
for (i = 0; i <= 524287; i += 1) {
key_buff1[i] = 0;
}
}
#pragma omp parallel for private (i)
for (i = 0; i <= 524287; i += 1) {
prv_buff1[i] = 0;
}
/* Copy keys into work array; keys in key_array will be reused each iter. */
for (i = 0; i <= 8388607; i += 1) {
key_buff2[i] = key_array[i];
/* Ranking of all keys occurs in this section: */
/* In this section, the keys themselves are used as their
own indexes to determine how many of each there are: their
individual population */
/* Now they have individual key */
prv_buff1[key_buff2[i]]++;
}
/* population */
for (i = 0; i <= 524286; i += 1) {
prv_buff1[i + 1] += prv_buff1[i];
}
{
#pragma omp parallel for private (i)
for (i = 0; i <= 524287; i += 1) {
key_buff1[i] += prv_buff1[i];
}
}
/* To obtain ranks of each key, successively add the individual key
population, not forgetting to add m, the total of lesser keys,
to the first key population */
{
/* This is the partial verify test section */
/* Observe that test_rank_array vals are */
/* shifted differently for different cases */
for (i = 0; i <= 4; i += 1) {
/* test vals were put here */
k = partial_verify_vals[i];
if (0 <= k && k <= (1 << 23) - 1)
switch('A'){
case 'S':
if (i <= 2) {
if (key_buff1[k - 1] != test_rank_array[i] + iteration) {
printf("Failed partial verification: iteration %d, test key %d\n",iteration,i);
}
else
passed_verification++;
}
else {
if (key_buff1[k - 1] != test_rank_array[i] - iteration) {
printf("Failed partial verification: iteration %d, test key %d\n",iteration,i);
}
else
passed_verification++;
}
break;
case 'W':
if (i < 2) {
if (key_buff1[k - 1] != test_rank_array[i] + (iteration - 2)) {
printf("Failed partial verification: iteration %d, test key %d\n",iteration,i);
}
else
passed_verification++;
}
else {
if (key_buff1[k - 1] != test_rank_array[i] - iteration) {
printf("Failed partial verification: iteration %d, test key %d\n",iteration,i);
}
else
passed_verification++;
}
break;
case 'A':
if (i <= 2) {
if (key_buff1[k - 1] != test_rank_array[i] + (iteration - 1)) {
printf("Failed partial verification: iteration %d, test key %d\n",iteration,i);
}
else
passed_verification++;
}
else {
if (key_buff1[k - 1] != test_rank_array[i] - (iteration - 1)) {
printf("Failed partial verification: iteration %d, test key %d\n",iteration,i);
}
else
passed_verification++;
}
break;
case 'B':
if (i == 1 || i == 2 || i == 4) {
if (key_buff1[k - 1] != test_rank_array[i] + iteration) {
printf("Failed partial verification: iteration %d, test key %d\n",iteration,i);
}
else
passed_verification++;
}
else {
if (key_buff1[k - 1] != test_rank_array[i] - iteration) {
printf("Failed partial verification: iteration %d, test key %d\n",iteration,i);
}
else
passed_verification++;
}
break;
case 'C':
if (i <= 2) {
if (key_buff1[k - 1] != test_rank_array[i] + iteration) {
printf("Failed partial verification: iteration %d, test key %d\n",iteration,i);
}
else
passed_verification++;
}
else {
if (key_buff1[k - 1] != test_rank_array[i] - iteration) {
printf("Failed partial verification: iteration %d, test key %d\n",iteration,i);
}
else
passed_verification++;
}
break;
}
}
/* Make copies of rank info for use by full_verify: these variables
in rank are local; making them global slows down the code, probably
since they cannot be made register by compiler */
if (iteration == 10)
key_buff_ptr_global = key_buff1;
/* end master */
}
}
/*****************************************************************/
/************* M A I N ****************/
/*****************************************************************/
int main(argc,argv)
int argc;
char **argv;
{
int i;
int iteration;
int itemp;
int nthreads = 1;
double timecounter;
double maxtime;
/* Initialize the verification arrays if a valid class */
#pragma omp parallel for private (i)
for (i = 0; i <= 4; i += 1) {
switch('A'){
case 'S':
test_index_array[i] = S_test_index_array[i];
test_rank_array[i] = S_test_rank_array[i];
break;
case 'A':
test_index_array[i] = A_test_index_array[i];
test_rank_array[i] = A_test_rank_array[i];
break;
case 'W':
test_index_array[i] = W_test_index_array[i];
test_rank_array[i] = W_test_rank_array[i];
break;
case 'B':
test_index_array[i] = B_test_index_array[i];
test_rank_array[i] = B_test_rank_array[i];
break;
case 'C':
test_index_array[i] = C_test_index_array[i];
test_rank_array[i] = C_test_rank_array[i];
break;
}
}
;
/* Printout initial NPB info */
printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version - IS Benchmark\n\n");
printf(" Size: %d (class %c)\n",1 << 23,'A');
printf(" Iterations: %d\n",10);
/* Initialize timer */
timer_clear(0);
/* Generate random number sequence and subsequent keys on all procs */
/* Random number gen seed */
create_seq(314159265.00,1220703125.00);
/* Random number gen mult */
/* Do one interation for free (i.e., untimed) to guarantee initialization of
all data and code pages and respective tables */
rank(1);
/* Start verification counter */
passed_verification = 0;
if ('A' != 'S')
printf("\n iteration\n");
/* Start timer */
timer_start(0);
/* This is the main iteration */
for (iteration = 1; iteration <= 10; iteration += 1) {
if ('A' != 'S')
printf(" %d\n",iteration);
rank(iteration);
//#if defined(_OPENMP)
// nthreads = omp_get_num_threads();
//#endif /* _OPENMP */
}
/* End of timing, obtain maximum time of all processors */
timer_stop(0);
timecounter = (timer_read(0));
/* This tests that keys are in sequence: sorting of last ranked key seq
occurs here, but is an untimed operation */
full_verify();
/* The final printout */
if (passed_verification != 5 * 10 + 1)
passed_verification = 0;
c_print_results("IS",'A',1 << 23,0,0,10,nthreads,timecounter,((double )(10 * (1 << 23))) / timecounter / 1000000.,"keys ranked",passed_verification,"3.0 structured","14 Jan 2020","(none)","(none)","-lm","(none)","(none)","(none)","randlc");
/**************************/
/* E N D P R O G R A M */
}
/**************************/
|
segment.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS EEEEE GGGG M M EEEEE N N TTTTT %
% SS E G MM MM E NN N T %
% SSS EEE G GGG M M M EEE N N N T %
% SS E G G M M E N NN T %
% SSSSS EEEEE GGGG M M EEEEE N N T %
% %
% %
% MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means %
% %
% Software Design %
% John Cristy %
% April 1993 %
% %
% %
% Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Segment segments an image by analyzing the histograms of the color
% components and identifying units that are homogeneous with the fuzzy
% c-means technique. The scale-space filter analyzes the histograms of
% the three color components of the image and identifies a set of
% classes. The extents of each class is used to coarsely segment the
% image with thresholding. The color associated with each class is
% determined by the mean color of all pixels within the extents of a
% particular class. Finally, any unclassified pixels are assigned to
% the closest class with the fuzzy c-means technique.
%
% The fuzzy c-Means algorithm can be summarized as follows:
%
% o Build a histogram, one for each color component of the image.
%
% o For each histogram, successively apply the scale-space filter and
% build an interval tree of zero crossings in the second derivative
% at each scale. Analyze this scale-space ``fingerprint'' to
% determine which peaks and valleys in the histogram are most
% predominant.
%
% o The fingerprint defines intervals on the axis of the histogram.
% Each interval contains either a minima or a maxima in the original
% signal. If each color component lies within the maxima interval,
% that pixel is considered ``classified'' and is assigned an unique
% class number.
%
% o Any pixel that fails to be classified in the above thresholding
% pass is classified using the fuzzy c-Means technique. It is
% assigned to one of the classes discovered in the histogram analysis
% phase.
%
% The fuzzy c-Means technique attempts to cluster a pixel by finding
% the local minima of the generalized within group sum of squared error
% objective function. A pixel is assigned to the closest class of
% which the fuzzy membership has a maximum value.
%
% Segment is strongly based on software written by Andy Gallo,
% University of Delaware.
%
% The following reference was used in creating this program:
%
% Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation
% Algorithm Based on the Thresholding and the Fuzzy c-Means
% Techniques", Pattern Recognition, Volume 23, Number 9, pages
% 935-952, 1990.
%
%
*/
#include "magick/studio.h"
#include "magick/cache.h"
#include "magick/color.h"
#include "magick/colormap.h"
#include "magick/colorspace.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/quantum-private.h"
#include "magick/segment.h"
#include "magick/string_.h"
/*
Define declarations.
*/
#define MaxDimension 3
#define DeltaTau 0.5f
#if defined(FastClassify)
#define WeightingExponent 2.0
#define SegmentPower(ratio) (ratio)
#else
#define WeightingExponent 2.5
#define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0)));
#endif
#define Tau 5.2f
/*
Typedef declarations.
*/
typedef struct _ExtentPacket
{
MagickRealType
center;
ssize_t
index,
left,
right;
} ExtentPacket;
typedef struct _Cluster
{
struct _Cluster
*next;
ExtentPacket
red,
green,
blue;
ssize_t
count,
id;
} Cluster;
typedef struct _IntervalTree
{
MagickRealType
tau;
ssize_t
left,
right;
MagickRealType
mean_stability,
stability;
struct _IntervalTree
*sibling,
*child;
} IntervalTree;
typedef struct _ZeroCrossing
{
MagickRealType
tau,
histogram[256];
short
crossings[256];
} ZeroCrossing;
/*
Constant declarations.
*/
static const int
Blue = 2,
Green = 1,
Red = 0,
SafeMargin = 3,
TreeLength = 600;
/*
Method prototypes.
*/
static MagickRealType
OptimalTau(const ssize_t *,const double,const double,const double,
const double,short *);
static ssize_t
DefineRegion(const short *,ExtentPacket *);
static void
InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *),
ScaleSpace(const ssize_t *,const MagickRealType,MagickRealType *),
ZeroCrossHistogram(MagickRealType *,const MagickRealType,short *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Classify() defines one or more classes. Each pixel is thresholded to
% determine which class it belongs to. If the class is not identified it is
% assigned to the closest class based on the fuzzy c-Means technique.
%
% The format of the Classify method is:
%
% MagickBooleanType Classify(Image *image,short **extrema,
% const MagickRealType cluster_threshold,
% const MagickRealType weighting_exponent,
% const MagickBooleanType verbose)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
% o cluster_threshold: This MagickRealType represents the minimum number of
% pixels contained in a hexahedra before it can be considered valid
% (expressed as a percentage).
%
% o weighting_exponent: Specifies the membership weighting exponent.
%
% o verbose: A value greater than zero prints detailed information about
% the identified classes.
%
*/
static MagickBooleanType Classify(Image *image,short **extrema,
const MagickRealType cluster_threshold,
const MagickRealType weighting_exponent,const MagickBooleanType verbose)
{
#define SegmentImageTag "Segment/Image"
CacheView
*image_view;
Cluster
*cluster,
*head,
*last_cluster,
*next_cluster;
ExceptionInfo
*exception;
ExtentPacket
blue,
green,
red;
MagickOffsetType
progress;
MagickRealType
*free_squares;
MagickStatusType
status;
register ssize_t
i;
register MagickRealType
*squares;
size_t
number_clusters;
ssize_t
count,
y;
/*
Form clusters.
*/
cluster=(Cluster *) NULL;
head=(Cluster *) NULL;
(void) ResetMagickMemory(&red,0,sizeof(red));
(void) ResetMagickMemory(&green,0,sizeof(green));
(void) ResetMagickMemory(&blue,0,sizeof(blue));
while (DefineRegion(extrema[Red],&red) != 0)
{
green.index=0;
while (DefineRegion(extrema[Green],&green) != 0)
{
blue.index=0;
while (DefineRegion(extrema[Blue],&blue) != 0)
{
/*
Allocate a new class.
*/
if (head != (Cluster *) NULL)
{
cluster->next=(Cluster *) AcquireMagickMemory(
sizeof(*cluster->next));
cluster=cluster->next;
}
else
{
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
head=cluster;
}
if (cluster == (Cluster *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
}
}
}
if (head == (Cluster *) NULL)
{
/*
No classes were identified-- create one.
*/
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
if (cluster == (Cluster *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
head=cluster;
}
/*
Count the pixels for each cluster.
*/
status=MagickTrue;
count=0;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
if (((ssize_t) ScaleQuantumToChar(GetRedPixelComponent(p)) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetRedPixelComponent(p)) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetGreenPixelComponent(p)) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetGreenPixelComponent(p)) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetBluePixelComponent(p)) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetBluePixelComponent(p)) <=
(cluster->blue.right+SafeMargin)))
{
/*
Count this pixel.
*/
count++;
cluster->red.center+=(MagickRealType) ScaleQuantumToChar(GetRedPixelComponent(p));
cluster->green.center+=(MagickRealType)
ScaleQuantumToChar(GetGreenPixelComponent(p));
cluster->blue.center+=(MagickRealType) ScaleQuantumToChar(GetBluePixelComponent(p));
cluster->count++;
break;
}
p++;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_Classify)
#endif
proceed=SetImageProgress(image,SegmentImageTag,progress++,
2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
/*
Remove clusters that do not meet minimum cluster threshold.
*/
count=0;
last_cluster=head;
next_cluster=head;
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
if ((cluster->count > 0) &&
(cluster->count >= (count*cluster_threshold/100.0)))
{
/*
Initialize cluster.
*/
cluster->id=count;
cluster->red.center/=cluster->count;
cluster->green.center/=cluster->count;
cluster->blue.center/=cluster->count;
count++;
last_cluster=cluster;
continue;
}
/*
Delete cluster.
*/
if (cluster == head)
head=next_cluster;
else
last_cluster->next=next_cluster;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
number_clusters=(size_t) count;
if (verbose != MagickFalse)
{
/*
Print cluster statistics.
*/
(void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n");
(void) FormatLocaleFile(stdout,"===================\n\n");
(void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double)
cluster_threshold);
(void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double)
weighting_exponent);
(void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n",
(double) number_clusters);
/*
Print the total number of points per cluster.
*/
(void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n");
(void) FormatLocaleFile(stdout,"=============================\n\n");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
(void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double)
cluster->id,(double) cluster->count);
/*
Print the cluster extents.
*/
(void) FormatLocaleFile(stdout,
"\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension);
(void) FormatLocaleFile(stdout,"================");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
(void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double)
cluster->id);
(void) FormatLocaleFile(stdout,
"%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double)
cluster->red.left,(double) cluster->red.right,(double)
cluster->green.left,(double) cluster->green.right,(double)
cluster->blue.left,(double) cluster->blue.right);
}
/*
Print the cluster center values.
*/
(void) FormatLocaleFile(stdout,
"\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension);
(void) FormatLocaleFile(stdout,"=====================");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
(void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double)
cluster->id);
(void) FormatLocaleFile(stdout,"%g %g %g\n",(double)
cluster->red.center,(double) cluster->green.center,(double)
cluster->blue.center);
}
(void) FormatLocaleFile(stdout,"\n");
}
if (number_clusters > 256)
ThrowBinaryException(ImageError,"TooManyClusters",image->filename);
/*
Speed up distance calculations.
*/
squares=(MagickRealType *) AcquireQuantumMemory(513UL,sizeof(*squares));
if (squares == (MagickRealType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
squares+=255;
for (i=(-255); i <= 255; i++)
squares[i]=(MagickRealType) i*(MagickRealType) i;
/*
Allocate image colormap.
*/
if (AcquireImageColormap(image,number_clusters) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
i=0;
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
image->colormap[i].red=ScaleCharToQuantum((unsigned char)
(cluster->red.center+0.5));
image->colormap[i].green=ScaleCharToQuantum((unsigned char)
(cluster->green.center+0.5));
image->colormap[i].blue=ScaleCharToQuantum((unsigned char)
(cluster->blue.center+0.5));
i++;
}
/*
Do course grain classes.
*/
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Cluster
*cluster;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetIndexPixelComponent(indexes+x,0);
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
if (((ssize_t) ScaleQuantumToChar(q->red) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(q->red) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(q->green) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(q->green) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(q->blue) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(q->blue) <=
(cluster->blue.right+SafeMargin)))
{
/*
Classify this pixel.
*/
SetIndexPixelComponent(indexes+x,cluster->id);
break;
}
}
if (cluster == (Cluster *) NULL)
{
MagickRealType
distance_squared,
local_minima,
numerator,
ratio,
sum;
register ssize_t
j,
k;
/*
Compute fuzzy membership.
*/
local_minima=0.0;
for (j=0; j < (ssize_t) image->colors; j++)
{
sum=0.0;
p=image->colormap+j;
distance_squared=squares[(ssize_t) ScaleQuantumToChar(q->red)-
(ssize_t) ScaleQuantumToChar(GetRedPixelComponent(p))]+
squares[(ssize_t) ScaleQuantumToChar(q->green)-
(ssize_t) ScaleQuantumToChar(GetGreenPixelComponent(p))]+
squares[(ssize_t) ScaleQuantumToChar(q->blue)-
(ssize_t) ScaleQuantumToChar(GetBluePixelComponent(p))];
numerator=distance_squared;
for (k=0; k < (ssize_t) image->colors; k++)
{
p=image->colormap+k;
distance_squared=squares[(ssize_t) ScaleQuantumToChar(q->red)-
(ssize_t) ScaleQuantumToChar(GetRedPixelComponent(p))]+
squares[(ssize_t) ScaleQuantumToChar(q->green)-
(ssize_t) ScaleQuantumToChar(GetGreenPixelComponent(p))]+
squares[(ssize_t) ScaleQuantumToChar(q->blue)-
(ssize_t) ScaleQuantumToChar(GetBluePixelComponent(p))];
ratio=numerator/distance_squared;
sum+=SegmentPower(ratio);
}
if ((sum != 0.0) && ((1.0/sum) > local_minima))
{
/*
Classify this pixel.
*/
local_minima=1.0/sum;
SetIndexPixelComponent(indexes+x,j);
}
}
}
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_Classify)
#endif
proceed=SetImageProgress(image,SegmentImageTag,progress++,
2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
status&=SyncImage(image);
/*
Relinquish resources.
*/
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
squares-=255;
free_squares=squares;
free_squares=(MagickRealType *) RelinquishMagickMemory(free_squares);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C r o s s i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCrossings() guarantees that an even number of zero crossings
% always lie between two crossings.
%
% The format of the ConsolidateCrossings method is:
%
% ConsolidateCrossings(ZeroCrossing *zero_crossing,
% const size_t number_crossings)
%
% A description of each parameter follows.
%
% o zero_crossing: Specifies an array of structures of type ZeroCrossing.
%
% o number_crossings: This size_t specifies the number of elements
% in the zero_crossing array.
%
*/
static inline ssize_t MagickAbsoluteValue(const ssize_t x)
{
if (x < 0)
return(-x);
return(x);
}
static inline ssize_t MagickMax(const ssize_t x,const ssize_t y)
{
if (x > y)
return(x);
return(y);
}
static inline ssize_t MagickMin(const ssize_t x,const ssize_t y)
{
if (x < y)
return(x);
return(y);
}
static void ConsolidateCrossings(ZeroCrossing *zero_crossing,
const size_t number_crossings)
{
register ssize_t
i,
j,
k,
l;
ssize_t
center,
correct,
count,
left,
right;
/*
Consolidate zero crossings.
*/
for (i=(ssize_t) number_crossings-1; i >= 0; i--)
for (j=0; j <= 255; j++)
{
if (zero_crossing[i].crossings[j] == 0)
continue;
/*
Find the entry that is closest to j and still preserves the
property that there are an even number of crossings between
intervals.
*/
for (k=j-1; k > 0; k--)
if (zero_crossing[i+1].crossings[k] != 0)
break;
left=MagickMax(k,0);
center=j;
for (k=j+1; k < 255; k++)
if (zero_crossing[i+1].crossings[k] != 0)
break;
right=MagickMin(k,255);
/*
K is the zero crossing just left of j.
*/
for (k=j-1; k > 0; k--)
if (zero_crossing[i].crossings[k] != 0)
break;
if (k < 0)
k=0;
/*
Check center for an even number of crossings between k and j.
*/
correct=(-1);
if (zero_crossing[i+1].crossings[j] != 0)
{
count=0;
for (l=k+1; l < center; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (center != k))
correct=center;
}
/*
Check left for an even number of crossings between k and j.
*/
if (correct == -1)
{
count=0;
for (l=k+1; l < left; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (left != k))
correct=left;
}
/*
Check right for an even number of crossings between k and j.
*/
if (correct == -1)
{
count=0;
for (l=k+1; l < right; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (right != k))
correct=right;
}
l=(ssize_t) zero_crossing[i].crossings[j];
zero_crossing[i].crossings[j]=0;
if (correct != -1)
zero_crossing[i].crossings[correct]=(short) l;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e R e g i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineRegion() defines the left and right boundaries of a peak region.
%
% The format of the DefineRegion method is:
%
% ssize_t DefineRegion(const short *extrema,ExtentPacket *extents)
%
% A description of each parameter follows.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
% o extents: This pointer to an ExtentPacket represent the extends
% of a particular peak or valley of a color component.
%
*/
static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents)
{
/*
Initialize to default values.
*/
extents->left=0;
extents->center=0.0;
extents->right=255;
/*
Find the left side (maxima).
*/
for ( ; extents->index <= 255; extents->index++)
if (extrema[extents->index] > 0)
break;
if (extents->index > 255)
return(MagickFalse); /* no left side - no region exists */
extents->left=extents->index;
/*
Find the right side (minima).
*/
for ( ; extents->index <= 255; extents->index++)
if (extrema[extents->index] < 0)
break;
extents->right=extents->index-1;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e r i v a t i v e H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DerivativeHistogram() determines the derivative of the histogram using
% central differencing.
%
% The format of the DerivativeHistogram method is:
%
% DerivativeHistogram(const MagickRealType *histogram,
% MagickRealType *derivative)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of MagickRealTypes representing the number
% of pixels for each intensity of a particular color component.
%
% o derivative: This array of MagickRealTypes is initialized by
% DerivativeHistogram to the derivative of the histogram using central
% differencing.
%
*/
static void DerivativeHistogram(const MagickRealType *histogram,
MagickRealType *derivative)
{
register ssize_t
i,
n;
/*
Compute endpoints using second order polynomial interpolation.
*/
n=255;
derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]);
derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]);
/*
Compute derivative using central differencing.
*/
for (i=1; i < n; i++)
derivative[i]=(histogram[i+1]-histogram[i-1])/2.0;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e D y n a m i c T h r e s h o l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDynamicThreshold() returns the dynamic threshold for an image.
%
% The format of the GetImageDynamicThreshold method is:
%
% MagickBooleanType GetImageDynamicThreshold(const Image *image,
% const double cluster_threshold,const double smooth_threshold,
% MagickPixelPacket *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cluster_threshold: This MagickRealType represents the minimum number of
% pixels contained in a hexahedra before it can be considered valid
% (expressed as a percentage).
%
% o smooth_threshold: the smoothing threshold eliminates noise in the second
% derivative of the histogram. As the value is increased, you can expect a
% smoother second derivative.
%
% o pixel: return the dynamic threshold here.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image,
const double cluster_threshold,const double smooth_threshold,
MagickPixelPacket *pixel,ExceptionInfo *exception)
{
Cluster
*background,
*cluster,
*object,
*head,
*last_cluster,
*next_cluster;
ExtentPacket
blue,
green,
red;
MagickBooleanType
proceed;
MagickRealType
threshold;
register const PixelPacket
*p;
register ssize_t
i,
x;
short
*extrema[MaxDimension];
ssize_t
count,
*histogram[MaxDimension],
y;
/*
Allocate histogram and extrema.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
GetMagickPixelPacket(image,pixel);
for (i=0; i < MaxDimension; i++)
{
histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram));
extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram));
if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL))
{
for (i-- ; i >= 0; i--)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
}
/*
Initialize histogram.
*/
InitializeHistogram(image,histogram,exception);
(void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]);
(void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]);
(void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]);
/*
Form clusters.
*/
cluster=(Cluster *) NULL;
head=(Cluster *) NULL;
(void) ResetMagickMemory(&red,0,sizeof(red));
(void) ResetMagickMemory(&green,0,sizeof(green));
(void) ResetMagickMemory(&blue,0,sizeof(blue));
while (DefineRegion(extrema[Red],&red) != 0)
{
green.index=0;
while (DefineRegion(extrema[Green],&green) != 0)
{
blue.index=0;
while (DefineRegion(extrema[Blue],&blue) != 0)
{
/*
Allocate a new class.
*/
if (head != (Cluster *) NULL)
{
cluster->next=(Cluster *) AcquireMagickMemory(
sizeof(*cluster->next));
cluster=cluster->next;
}
else
{
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
head=cluster;
}
if (cluster == (Cluster *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
return(MagickFalse);
}
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
}
}
}
if (head == (Cluster *) NULL)
{
/*
No classes were identified-- create one.
*/
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
if (cluster == (Cluster *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
head=cluster;
}
/*
Count the pixels for each cluster.
*/
count=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
if (((ssize_t) ScaleQuantumToChar(GetRedPixelComponent(p)) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetRedPixelComponent(p)) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetGreenPixelComponent(p)) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetGreenPixelComponent(p)) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetBluePixelComponent(p)) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetBluePixelComponent(p)) <=
(cluster->blue.right+SafeMargin)))
{
/*
Count this pixel.
*/
count++;
cluster->red.center+=(MagickRealType)
ScaleQuantumToChar(GetRedPixelComponent(p));
cluster->green.center+=(MagickRealType)
ScaleQuantumToChar(GetGreenPixelComponent(p));
cluster->blue.center+=(MagickRealType)
ScaleQuantumToChar(GetBluePixelComponent(p));
cluster->count++;
break;
}
p++;
}
proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y,
2*image->rows);
if (proceed == MagickFalse)
break;
}
/*
Remove clusters that do not meet minimum cluster threshold.
*/
count=0;
last_cluster=head;
next_cluster=head;
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
if ((cluster->count > 0) &&
(cluster->count >= (count*cluster_threshold/100.0)))
{
/*
Initialize cluster.
*/
cluster->id=count;
cluster->red.center/=cluster->count;
cluster->green.center/=cluster->count;
cluster->blue.center/=cluster->count;
count++;
last_cluster=cluster;
continue;
}
/*
Delete cluster.
*/
if (cluster == head)
head=next_cluster;
else
last_cluster->next=next_cluster;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
object=head;
background=head;
if (count > 1)
{
object=head->next;
for (cluster=object; cluster->next != (Cluster *) NULL; )
{
if (cluster->count < object->count)
object=cluster;
cluster=cluster->next;
}
background=head->next;
for (cluster=background; cluster->next != (Cluster *) NULL; )
{
if (cluster->count > background->count)
background=cluster;
cluster=cluster->next;
}
}
threshold=(background->red.center+object->red.center)/2.0;
pixel->red=(MagickRealType) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
threshold=(background->green.center+object->green.center)/2.0;
pixel->green=(MagickRealType) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
threshold=(background->blue.center+object->blue.center)/2.0;
pixel->blue=(MagickRealType) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
/*
Relinquish resources.
*/
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
for (i=0; i < MaxDimension; i++)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n i t i a l i z e H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeHistogram() computes the histogram for an image.
%
% The format of the InitializeHistogram method is:
%
% InitializeHistogram(const Image *image,ssize_t **histogram)
%
% A description of each parameter follows.
%
% o image: Specifies a pointer to an Image structure; returned from
% ReadImage.
%
% o histogram: Specifies an array of integers representing the number
% of pixels for each intensity of a particular color component.
%
*/
static void InitializeHistogram(const Image *image,ssize_t **histogram,
ExceptionInfo *exception)
{
register const PixelPacket
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Initialize histogram.
*/
for (i=0; i <= 255; i++)
{
histogram[Red][i]=0;
histogram[Green][i]=0;
histogram[Blue][i]=0;
}
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
histogram[Red][(ssize_t) ScaleQuantumToChar(GetRedPixelComponent(p))]++;
histogram[Green][(ssize_t) ScaleQuantumToChar(GetGreenPixelComponent(p))]++;
histogram[Blue][(ssize_t) ScaleQuantumToChar(GetBluePixelComponent(p))]++;
p++;
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n i t i a l i z e I n t e r v a l T r e e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeIntervalTree() initializes an interval tree from the lists of
% zero crossings.
%
% The format of the InitializeIntervalTree method is:
%
% InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes,
% IntervalTree *node)
%
% A description of each parameter follows.
%
% o zero_crossing: Specifies an array of structures of type ZeroCrossing.
%
% o number_crossings: This size_t specifies the number of elements
% in the zero_crossing array.
%
*/
static void InitializeList(IntervalTree **list,ssize_t *number_nodes,
IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->child == (IntervalTree *) NULL)
list[(*number_nodes)++]=node;
InitializeList(list,number_nodes,node->sibling);
InitializeList(list,number_nodes,node->child);
}
static void MeanStability(IntervalTree *node)
{
register IntervalTree
*child;
if (node == (IntervalTree *) NULL)
return;
node->mean_stability=0.0;
child=node->child;
if (child != (IntervalTree *) NULL)
{
register ssize_t
count;
register MagickRealType
sum;
sum=0.0;
count=0;
for ( ; child != (IntervalTree *) NULL; child=child->sibling)
{
sum+=child->stability;
count++;
}
node->mean_stability=sum/(MagickRealType) count;
}
MeanStability(node->sibling);
MeanStability(node->child);
}
static void Stability(IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->child == (IntervalTree *) NULL)
node->stability=0.0;
else
node->stability=node->tau-(node->child)->tau;
Stability(node->sibling);
Stability(node->child);
}
static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing,
const size_t number_crossings)
{
IntervalTree
*head,
**list,
*node,
*root;
register ssize_t
i;
ssize_t
j,
k,
left,
number_nodes;
/*
Allocate interval tree.
*/
list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength,
sizeof(*list));
if (list == (IntervalTree **) NULL)
return((IntervalTree *) NULL);
/*
The root is the entire histogram.
*/
root=(IntervalTree *) AcquireMagickMemory(sizeof(*root));
root->child=(IntervalTree *) NULL;
root->sibling=(IntervalTree *) NULL;
root->tau=0.0;
root->left=0;
root->right=255;
for (i=(-1); i < (ssize_t) number_crossings; i++)
{
/*
Initialize list with all nodes with no children.
*/
number_nodes=0;
InitializeList(list,&number_nodes,root);
/*
Split list.
*/
for (j=0; j < number_nodes; j++)
{
head=list[j];
left=head->left;
node=head;
for (k=head->left+1; k < head->right; k++)
{
if (zero_crossing[i+1].crossings[k] != 0)
{
if (node == head)
{
node->child=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->child));
node=node->child;
}
else
{
node->sibling=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->sibling));
node=node->sibling;
}
node->tau=zero_crossing[i+1].tau;
node->child=(IntervalTree *) NULL;
node->sibling=(IntervalTree *) NULL;
node->left=left;
node->right=k;
left=k;
}
}
if (left != head->left)
{
node->sibling=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->sibling));
node=node->sibling;
node->tau=zero_crossing[i+1].tau;
node->child=(IntervalTree *) NULL;
node->sibling=(IntervalTree *) NULL;
node->left=left;
node->right=head->right;
}
}
}
/*
Determine the stability: difference between a nodes tau and its child.
*/
Stability(root->child);
MeanStability(root->child);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(root);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p t i m a l T a u %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OptimalTau() finds the optimal tau for each band of the histogram.
%
% The format of the OptimalTau method is:
%
% MagickRealType OptimalTau(const ssize_t *histogram,const double max_tau,
% const double min_tau,const double delta_tau,
% const double smooth_threshold,short *extrema)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of integers representing the number
% of pixels for each intensity of a particular color component.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
*/
static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes,
IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->stability >= node->mean_stability)
{
list[(*number_nodes)++]=node;
ActiveNodes(list,number_nodes,node->sibling);
}
else
{
ActiveNodes(list,number_nodes,node->sibling);
ActiveNodes(list,number_nodes,node->child);
}
}
static void FreeNodes(IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
FreeNodes(node->sibling);
FreeNodes(node->child);
node=(IntervalTree *) RelinquishMagickMemory(node);
}
static MagickRealType OptimalTau(const ssize_t *histogram,const double max_tau,
const double min_tau,const double delta_tau,const double smooth_threshold,
short *extrema)
{
IntervalTree
**list,
*node,
*root;
MagickBooleanType
peak;
MagickRealType
average_tau,
*derivative,
*second_derivative,
tau,
value;
register ssize_t
i,
x;
size_t
count,
number_crossings;
ssize_t
index,
j,
k,
number_nodes;
ZeroCrossing
*zero_crossing;
/*
Allocate interval tree.
*/
list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength,
sizeof(*list));
if (list == (IntervalTree **) NULL)
return(0.0);
/*
Allocate zero crossing list.
*/
count=(size_t) ((max_tau-min_tau)/delta_tau)+2;
zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count,
sizeof(*zero_crossing));
if (zero_crossing == (ZeroCrossing *) NULL)
return(0.0);
for (i=0; i < (ssize_t) count; i++)
zero_crossing[i].tau=(-1.0);
/*
Initialize zero crossing list.
*/
derivative=(MagickRealType *) AcquireQuantumMemory(256,sizeof(*derivative));
second_derivative=(MagickRealType *) AcquireQuantumMemory(256,
sizeof(*second_derivative));
if ((derivative == (MagickRealType *) NULL) ||
(second_derivative == (MagickRealType *) NULL))
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDerivatives");
i=0;
for (tau=max_tau; tau >= min_tau; tau-=delta_tau)
{
zero_crossing[i].tau=tau;
ScaleSpace(histogram,tau,zero_crossing[i].histogram);
DerivativeHistogram(zero_crossing[i].histogram,derivative);
DerivativeHistogram(derivative,second_derivative);
ZeroCrossHistogram(second_derivative,smooth_threshold,
zero_crossing[i].crossings);
i++;
}
/*
Add an entry for the original histogram.
*/
zero_crossing[i].tau=0.0;
for (j=0; j <= 255; j++)
zero_crossing[i].histogram[j]=(MagickRealType) histogram[j];
DerivativeHistogram(zero_crossing[i].histogram,derivative);
DerivativeHistogram(derivative,second_derivative);
ZeroCrossHistogram(second_derivative,smooth_threshold,
zero_crossing[i].crossings);
number_crossings=(size_t) i;
derivative=(MagickRealType *) RelinquishMagickMemory(derivative);
second_derivative=(MagickRealType *)
RelinquishMagickMemory(second_derivative);
/*
Ensure the scale-space fingerprints form lines in scale-space, not loops.
*/
ConsolidateCrossings(zero_crossing,number_crossings);
/*
Force endpoints to be included in the interval.
*/
for (i=0; i <= (ssize_t) number_crossings; i++)
{
for (j=0; j < 255; j++)
if (zero_crossing[i].crossings[j] != 0)
break;
zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]);
for (j=255; j > 0; j--)
if (zero_crossing[i].crossings[j] != 0)
break;
zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]);
}
/*
Initialize interval tree.
*/
root=InitializeIntervalTree(zero_crossing,number_crossings);
if (root == (IntervalTree *) NULL)
return(0.0);
/*
Find active nodes: stability is greater (or equal) to the mean stability of
its children.
*/
number_nodes=0;
ActiveNodes(list,&number_nodes,root->child);
/*
Initialize extrema.
*/
for (i=0; i <= 255; i++)
extrema[i]=0;
for (i=0; i < number_nodes; i++)
{
/*
Find this tau in zero crossings list.
*/
k=0;
node=list[i];
for (j=0; j <= (ssize_t) number_crossings; j++)
if (zero_crossing[j].tau == node->tau)
k=j;
/*
Find the value of the peak.
*/
peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue :
MagickFalse;
index=node->left;
value=zero_crossing[k].histogram[index];
for (x=node->left; x <= node->right; x++)
{
if (peak != MagickFalse)
{
if (zero_crossing[k].histogram[x] > value)
{
value=zero_crossing[k].histogram[x];
index=x;
}
}
else
if (zero_crossing[k].histogram[x] < value)
{
value=zero_crossing[k].histogram[x];
index=x;
}
}
for (x=node->left; x <= node->right; x++)
{
if (index == 0)
index=256;
if (peak != MagickFalse)
extrema[x]=(short) index;
else
extrema[x]=(short) (-index);
}
}
/*
Determine the average tau.
*/
average_tau=0.0;
for (i=0; i < number_nodes; i++)
average_tau+=list[i]->tau;
average_tau/=(MagickRealType) number_nodes;
/*
Relinquish resources.
*/
FreeNodes(root);
zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(average_tau);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S c a l e S p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleSpace() performs a scale-space filter on the 1D histogram.
%
% The format of the ScaleSpace method is:
%
% ScaleSpace(const ssize_t *histogram,const MagickRealType tau,
% MagickRealType *scale_histogram)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of MagickRealTypes representing the number
% of pixels for each intensity of a particular color component.
%
*/
static void ScaleSpace(const ssize_t *histogram,const MagickRealType tau,
MagickRealType *scale_histogram)
{
MagickRealType
alpha,
beta,
*gamma,
sum;
register ssize_t
u,
x;
gamma=(MagickRealType *) AcquireQuantumMemory(256,sizeof(*gamma));
if (gamma == (MagickRealType *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateGammaMap");
alpha=1.0/(tau*sqrt(2.0*MagickPI));
beta=(-1.0/(2.0*tau*tau));
for (x=0; x <= 255; x++)
gamma[x]=0.0;
for (x=0; x <= 255; x++)
{
gamma[x]=exp((double) beta*x*x);
if (gamma[x] < MagickEpsilon)
break;
}
for (x=0; x <= 255; x++)
{
sum=0.0;
for (u=0; u <= 255; u++)
sum+=(MagickRealType) histogram[u]*gamma[MagickAbsoluteValue(x-u)];
scale_histogram[x]=alpha*sum;
}
gamma=(MagickRealType *) RelinquishMagickMemory(gamma);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e g m e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SegmentImage() segment an image by analyzing the histograms of the color
% components and identifying units that are homogeneous with the fuzzy
% C-means technique.
%
% The format of the SegmentImage method is:
%
% MagickBooleanType SegmentImage(Image *image,
% const ColorspaceType colorspace,const MagickBooleanType verbose,
% const double cluster_threshold,const double smooth_threshold)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o colorspace: Indicate the colorspace.
%
% o verbose: Set to MagickTrue to print detailed information about the
% identified classes.
%
% o cluster_threshold: This represents the minimum number of pixels
% contained in a hexahedra before it can be considered valid (expressed
% as a percentage).
%
% o smooth_threshold: the smoothing threshold eliminates noise in the second
% derivative of the histogram. As the value is increased, you can expect a
% smoother second derivative.
%
*/
MagickExport MagickBooleanType SegmentImage(Image *image,
const ColorspaceType colorspace,const MagickBooleanType verbose,
const double cluster_threshold,const double smooth_threshold)
{
MagickBooleanType
status;
register ssize_t
i;
short
*extrema[MaxDimension];
ssize_t
*histogram[MaxDimension];
/*
Allocate histogram and extrema.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
for (i=0; i < MaxDimension; i++)
{
histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram));
extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema));
if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL))
{
for (i-- ; i >= 0; i--)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename)
}
}
if (colorspace != RGBColorspace)
(void) TransformImageColorspace(image,colorspace);
/*
Initialize histogram.
*/
InitializeHistogram(image,histogram,&image->exception);
(void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Red]);
(void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Green]);
(void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Blue]);
/*
Classify using the fuzzy c-Means technique.
*/
status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose);
if (colorspace != RGBColorspace)
(void) TransformImageColorspace(image,colorspace);
/*
Relinquish resources.
*/
for (i=0; i < MaxDimension; i++)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Z e r o C r o s s H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroCrossHistogram() find the zero crossings in a histogram and marks
% directions as: 1 is negative to positive; 0 is zero crossing; and -1
% is positive to negative.
%
% The format of the ZeroCrossHistogram method is:
%
% ZeroCrossHistogram(MagickRealType *second_derivative,
% const MagickRealType smooth_threshold,short *crossings)
%
% A description of each parameter follows.
%
% o second_derivative: Specifies an array of MagickRealTypes representing the
% second derivative of the histogram of a particular color component.
%
% o crossings: This array of integers is initialized with
% -1, 0, or 1 representing the slope of the first derivative of the
% of a particular color component.
%
*/
static void ZeroCrossHistogram(MagickRealType *second_derivative,
const MagickRealType smooth_threshold,short *crossings)
{
register ssize_t
i;
ssize_t
parity;
/*
Merge low numbers to zero to help prevent noise.
*/
for (i=0; i <= 255; i++)
if ((second_derivative[i] < smooth_threshold) &&
(second_derivative[i] >= -smooth_threshold))
second_derivative[i]=0.0;
/*
Mark zero crossings.
*/
parity=0;
for (i=0; i <= 255; i++)
{
crossings[i]=0;
if (second_derivative[i] < 0.0)
{
if (parity > 0)
crossings[i]=(-1);
parity=1;
}
else
if (second_derivative[i] > 0.0)
{
if (parity < 0)
crossings[i]=1;
parity=(-1);
}
}
}
|
fc_hcl_x86.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "fc_param.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "module/module.h"
#include "operator/op.h"
#include "utility/sys_port.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <math.h>
#include <string.h>
#if __SSE2__
#include <emmintrin.h>
#endif
#if __AVX__
#include <immintrin.h>
#endif
struct fc_data
{
int need_trans;
int batch; // N
int out_number; // OUT
int hidden; // hidden
int zero[3]; // input, kernel, output
float scale[3]; // input, kernel, output
};
static int innerproduct(int inn, int inc, int inh, int inw, int outc, const float* weight, const float* input, float* output,
const float* _bias, int num_thread, int cpu_affinity)
{
size_t elemsize = sizeof(float);
int size = inw * inh;
float tmp;
for (int n = 0; n < inn; n++)
{
#pragma omp parallel for num_threads(num_thread)
for (int p = 0; p < outc; p++)
{
int q = 0;
float sum = _bias ? _bias[p] : 0.f;
const float* weight1 = weight + p * inc * size;
const float* input1 = input + n * inc * size;
#if __AVX__ || __SSE__
#if __SSE__
float _sum[4] = {0.f};
__m128 _sum0 = _mm_set1_ps(0.f);
for (; q + 3 < inc * size; q = q + 4)
{
__m128 _input = _mm_loadu_ps(input1 + q);
__m128 _weight = _mm_loadu_ps(weight1 + q);
__m128 _sum1 = _mm_mul_ps(_input, _weight);
_sum0 = _mm_add_ps(_sum0, _sum1);
}
_mm_storeu_ps(_sum, _sum0);
tmp = _sum[0] + _sum[1] + _sum[2] + _sum[3];
sum = sum + tmp;
#else //__AVX__ \
// TODO
#endif
#endif
for (; q < inc * size; q++)
{
tmp = input1[q] * weight1[q];
sum = sum + tmp;
}
output[n * outc + p] = sum;
}
}
return 0;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct fc_data* op_param = (struct fc_data*)sys_malloc(sizeof(struct fc_data));
memset(op_param, 0, sizeof(struct fc_data));
exec_node->ops_priv = op_param;
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
sys_free(exec_node->ops_priv);
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
struct tensor* weight_tensor;
struct tensor* output_tensor;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
weight_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct fc_param* param = (struct fc_param*)ir_node->op.param_mem;
struct fc_data* op_param = (struct fc_data*)exec_node->ops_priv;
if (ir_graph->graph_layout == TENGINE_LAYOUT_NCHW)
{
int hidden = input_tensor->dims[1];
if (input_tensor->dim_num > 2)
hidden = hidden * input_tensor->dims[2];
if (input_tensor->dim_num > 3)
hidden = hidden * input_tensor->dims[3];
op_param->hidden = hidden;
}
else
{
int hidden = 0;
if (input_tensor->dim_num == 2)
hidden = input_tensor->dims[1];
if (input_tensor->dim_num == 3)
hidden = input_tensor->dims[1] * input_tensor->dims[2];
if (input_tensor->dim_num == 4)
hidden = input_tensor->dims[1] * input_tensor->dims[2] * input_tensor->dims[3];
op_param->hidden = hidden;
}
op_param->batch = input_tensor->dims[0];
op_param->out_number = param->num_output;
int weight_out = weight_tensor->dims[0];
if (weight_out == op_param->out_number)
op_param->need_trans = 0;
else
op_param->need_trans = 1;
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
struct tensor* weight_tensor;
struct tensor* bias_tensor;
struct tensor* output_tensor;
int num_thread = exec_graph->num_thread;
int cpu_affinity = exec_graph->cpu_affinity;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
weight_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct fc_param* param = (struct fc_param*)ir_node->op.param_mem;
struct fc_data* op_param = (struct fc_data*)exec_node->ops_priv;
const void* input_data = input_tensor->data;
void* weight_data = weight_tensor->data;
void* output_data = output_tensor->data;
int batch_number = input_tensor->dims[0];
int inc = input_tensor->dims[1];
int inh = input_tensor->dims[2] ? input_tensor->dims[2] : 1;
int inw = input_tensor->dims[3] ? input_tensor->dims[3] : 1;
int outc = output_tensor->dims[1];
void* bias_data = NULL;
if (ir_node->input_num > 2)
{
bias_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[2]);
bias_data = bias_tensor->data;
}
if (innerproduct(batch_number, inc, inh, inw, outc, (float*)weight_data, (float*)input_data,
(float*)output_data, (float*)bias_data, num_thread, cpu_affinity)
< 0)
return -1;
return 0;
}
static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* node = exec_node->ir_node;
struct graph* graph = node->graph;
struct tensor* input = get_ir_graph_tensor(graph, node->input_tensors[0]);
struct tensor* weight = get_ir_graph_tensor(graph, node->input_tensors[1]);
struct tensor* output = get_ir_graph_tensor(graph, node->output_tensors[0]);
int dim[4];
int n = weight->dims[0];
int k = weight->dims[1];
int m = input->dims[0];
int input_k = input->dims[1];
if (input->dim_num == 2)
{
dim[0] = m;
dim[1] = n;
}
else if (input->dim_num == 3)
{
if (input->dims[2] != 0)
input_k *= input->dims[2];
if (graph->graph_layout == TENGINE_LAYOUT_NHWC)
{
dim[0] = m;
dim[1] = 1;
dim[2] = n;
}
else
{
dim[0] = m;
dim[1] = n;
dim[2] = 1;
}
}
else if (input->dim_num == 4)
{
if (input->dims[2] * input->dims[3] != 0)
input_k *= input->dims[2] * input->dims[3];
if (graph->graph_layout == TENGINE_LAYOUT_NHWC)
{
dim[0] = m;
dim[1] = 1;
dim[2] = 1;
dim[3] = n;
}
else
{
dim[0] = m;
dim[1] = n;
dim[2] = 1;
dim[3] = 1;
}
}
else
return -1;
if (k != input_k)
{
TLOG_ERR("fc: input tensor and weight tensor shape does not match, hidden_number: %d\n", k);
return -1;
}
int ret = set_ir_tensor_shape(output, dim, input->dim_num);
return ret;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
struct node* ir_node = exec_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
/* todo support uint8 */
if (input_tensor->data_type != TENGINE_DT_FP32)
return 0;
return OPS_SCORE_BEST;
}
static struct node_ops hcl_node_ops = {.prerun = prerun,
.run = run,
.reshape = reshape,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_fc_hcl_x86_op()
{
return register_builtin_node_ops(OP_FC, &hcl_node_ops);
}
int unregister_fc_hcl_x86_op()
{
return unregister_builtin_node_ops(OP_FC, &hcl_node_ops);
}
|
SpatialMaxUnpooling.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "THNN/generic/SpatialMaxUnpooling.c"
#else
static void THNN_(SpatialMaxUnpooling_updateOutput_frame)(scalar_t *input_p, scalar_t *output_p,
THIndex_t *ind_p,
int nslices,
int iwidth, int iheight,
int owidth, int oheight)
{
int k;
int has_error = 0;
THIndex_t error_index = 0;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
scalar_t *output_p_k = output_p + k*owidth*oheight;
scalar_t *input_p_k = input_p + k*iwidth*iheight;
THIndex_t *ind_p_k = ind_p + k*iwidth*iheight;
int i, j;
THIndex_t maxp;
for(i = 0; i < iheight; i++)
{
for(j = 0; j < iwidth; j++)
{
maxp = ind_p_k[i*iwidth + j] - TH_INDEX_BASE; /* retrieve position of max */
if(maxp<0 || maxp>=owidth*oheight){
#pragma omp critical
{
has_error = 1;
error_index = maxp;
}
} else {
output_p_k[maxp] = input_p_k[i*iwidth + j]; /* update output */
}
}
}
}
if (has_error) {
THError("found an invalid max index %ld (output volumes are of size %dx%d)",
error_index, oheight, owidth);
}
}
void THNN_(SpatialMaxUnpooling_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
THIndexTensor *indices,
int owidth, int oheight)
{
int dimw = 2;
int dimh = 1;
int nbatch = 1;
int nslices;
int iheight;
int iwidth;
scalar_t *input_data;
scalar_t *output_data;
THIndex_t *indices_data;
AT_CHECK(!input->is_empty() && (input->dim() == 3 || input->dim() == 4),
"non-empty 3D or 4D (batch mode) tensor expected for input, but got sizes: ", input->sizes());
THNN_CHECK_SHAPE_INDICES(input, indices);
if (input->dim() == 4)
{
nbatch = input->size(0);
dimw++;
dimh++;
}
/* sizes */
nslices = input->size(dimh-1);
iheight = input->size(dimh);
iwidth = input->size(dimw);
/* get contiguous input and indices */
input = THTensor_(newContiguous)(input);
indices = THIndexTensor_(newContiguous)(indices);
/* resize output */
if (input->dim() == 3)
{
THTensor_(resize3d)(output, nslices, oheight, owidth);
THTensor_(zero)(output);
input_data = input->data<scalar_t>();
output_data = output->data<scalar_t>();
indices_data = THIndexTensor_(data)(indices);
THNN_(SpatialMaxUnpooling_updateOutput_frame)(input_data, output_data,
indices_data,
nslices,
iwidth, iheight,
owidth, oheight);
}
else
{
int p;
THTensor_(resize4d)(output, nbatch, nslices, oheight, owidth);
THTensor_(zero)(output);
input_data = input->data<scalar_t>();
output_data = output->data<scalar_t>();
indices_data = THIndexTensor_(data)(indices);
for (p = 0; p < nbatch; p++)
{
THNN_(SpatialMaxUnpooling_updateOutput_frame)(
input_data+p*nslices*iwidth*iheight,
output_data+p*nslices*owidth*oheight,
indices_data+p*nslices*iwidth*iheight,
nslices,
iwidth, iheight,
owidth, oheight);
}
}
/* cleanup */
c10::raw::intrusive_ptr::decref(input);
THIndexTensor_(free)(indices);
}
static void THNN_(SpatialMaxUnpooling_updateGradInput_frame)(scalar_t *gradInput_p, scalar_t *gradOutput_p,
THIndex_t *ind_p,
int nslices,
int iwidth, int iheight,
int owidth, int oheight)
{
int k;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
scalar_t *gradInput_p_k = gradInput_p + k*iwidth*iheight;
scalar_t *gradOutput_p_k = gradOutput_p + k*owidth*oheight;
THIndex_t *ind_p_k = ind_p + k*iwidth*iheight;
int i, j;
THIndex_t maxp;
for(i = 0; i < iheight; i++)
{
for(j = 0; j < iwidth; j++)
{
maxp = ind_p_k[i*iwidth + j] - TH_INDEX_BASE; /* retrieve position of max */
if(maxp < 0 || maxp >= owidth * oheight) {
THError("invalid max index %ld, owidth= %d, oheight= %d", maxp, owidth, oheight);
}
gradInput_p_k[i*iwidth + j] = gradOutput_p_k[maxp]; /* update gradient */
}
}
}
}
void THNN_(SpatialMaxUnpooling_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
THIndexTensor *indices,
int owidth, int oheight)
{
int dimw = 2;
int dimh = 1;
int nbatch = 1;
int nslices;
int iheight;
int iwidth;
scalar_t *gradInput_data;
scalar_t *gradOutput_data;
THIndex_t *indices_data;
THNN_CHECK_SHAPE_INDICES(input, indices);
/* get contiguous gradOutput and indices */
gradOutput = THTensor_(newContiguous)(gradOutput);
indices = THIndexTensor_(newContiguous)(indices);
/* resize */
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
if (input->dim() == 4) {
nbatch = input->size(0);
dimw++;
dimh++;
}
/* sizes */
nslices = input->size(dimh-1);
iheight = input->size(dimh);
iwidth = input->size(dimw);
if(owidth!=gradOutput->size(dimw) || oheight!=gradOutput->size(dimh)){
THError("Inconsistent gradOutput size. oheight= %d, owidth= %d, gradOutput: %dx%d",
oheight, owidth, gradOutput->size(dimh), gradOutput->size(dimw));
}
/* get raw pointers */
gradInput_data = gradInput->data<scalar_t>();
gradOutput_data = gradOutput->data<scalar_t>();
indices_data = THIndexTensor_(data)(indices);
/* backprop */
if (input->dim() == 3)
{
THNN_(SpatialMaxUnpooling_updateGradInput_frame)(gradInput_data, gradOutput_data,
indices_data,
nslices,
iwidth, iheight,
owidth, oheight);
}
else
{
int p;
for (p = 0; p < nbatch; p++)
{
THNN_(SpatialMaxUnpooling_updateGradInput_frame)(gradInput_data+p*nslices*iwidth*iheight, gradOutput_data+p*nslices*owidth*oheight,
indices_data+p*nslices*iwidth*iheight,
nslices,
iwidth, iheight,
owidth, oheight);
}
}
/* cleanup */
c10::raw::intrusive_ptr::decref(gradOutput);
THIndexTensor_(free)(indices);
}
#endif
|
homomorphic_functions.c | long long int sk[NUM_PRIME][4096];
long long int pk0[NUM_PRIME][4096], pk1[NUM_PRIME][4096];
long long int rlk00[NUM_PRIME][4096], rlk01[NUM_PRIME][4096], rlk10[NUM_PRIME][4096], rlk11[NUM_PRIME][4096];
long long int rlk20[NUM_PRIME][4096], rlk21[NUM_PRIME][4096], rlk30[NUM_PRIME][4096], rlk31[NUM_PRIME][4096], rlk40[NUM_PRIME][4096], rlk41[NUM_PRIME][4096];
//mpz_t quotient[THREADS], rem[THREADS];
//mpz_t temp_array64[THREADS];
//mpz_t chunk[THREADS];
//mpz_t temp_array512[THREADS];
void read_keys()
{
FILE *fp;
int i;
static mpz_t big_array[4096];
mpz_array_init(big_array[0], 4096, 256);
///////////////////////////////////////////////////////////////////////////
/////////////////////// Public key reading /////////////////////////////
fp = fopen("keys_sage_gen/pk0", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<NUM_PRIME; i++)
{
compute_mod(big_array, pk0[i], i);
}
fp = fopen("keys_sage_gen/pk1", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<NUM_PRIME; i++)
{
compute_mod(big_array, pk1[i], i);
}
fp = fopen("keys_sage_gen/sk", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<NUM_PRIME; i++)
{
compute_mod(big_array, sk[i], i);
}
/*
fp = fopen("keys/pk0_0to4095_q0", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &pk0[0][i]);
fclose(fp);
fp = fopen("keys/pk0_0to4095_q1", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &pk0[1][i]);
fclose(fp);
fp = fopen("keys/pk0_0to4095_q2", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &pk0[2][i]);
fclose(fp);
fp = fopen("keys/pk0_0to4095_q3", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &pk0[3][i]);
fclose(fp);
fp = fopen("keys/pk0_0to4095_q4", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &pk0[4][i]);
fclose(fp);
fp = fopen("keys/pk0_0to4095_q5", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &pk0[5][i]);
fclose(fp);
fp = fopen("keys/pk0_0to4095_q6", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &pk0[6][i]);
fclose(fp);
fp = fopen("keys/pk1_0to4095_q0", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &pk1[0][i]);
fclose(fp);
fp = fopen("keys/pk1_0to4095_q1", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &pk1[1][i]);
fclose(fp);
fp = fopen("keys/pk1_0to4095_q2", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &pk1[2][i]);
fclose(fp);
fp = fopen("keys/pk1_0to4095_q3", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &pk1[3][i]);
fclose(fp);
fp = fopen("keys/pk1_0to4095_q4", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &pk1[4][i]);
fclose(fp);
fp = fopen("keys/pk1_0to4095_q5", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &pk1[5][i]);
fclose(fp);
fp = fopen("keys/pk1_0to4095_q6", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &pk1[6][i]);
fclose(fp);
fp = fopen("keys/sk_0to4095_q0", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &sk[0][i]);
fclose(fp);
fp = fopen("keys/sk_0to4095_q1", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &sk[1][i]);
fclose(fp);
fp = fopen("keys/sk_0to4095_q2", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &sk[2][i]);
fclose(fp);
fp = fopen("keys/sk_0to4095_q3", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &sk[3][i]);
fclose(fp);
fp = fopen("keys/sk_0to4095_q4", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &sk[4][i]);
fclose(fp);
fp = fopen("keys/sk_0to4095_q5", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &sk[5][i]);
fclose(fp);
fp = fopen("keys/sk_0to4095_q6", "r");
for(i=0; i<4096; i++)
fscanf(fp, "%lu", &sk[6][i]);
fclose(fp);
*/
///////////////////////////////////////////////////////////////////////////
//////////////// Relinearisation key reading /////////////////////////////
fp = fopen("keys_sage_gen/rlk0_0", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<NUM_PRIME; i++)
{
compute_mod(big_array, rlk00[i], i);
}
fp = fopen("keys_sage_gen/rlk0_1", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<NUM_PRIME; i++)
{
compute_mod(big_array, rlk01[i], i);
}
fp = fopen("keys_sage_gen/rlk1_0", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<NUM_PRIME; i++)
{
compute_mod(big_array, rlk10[i], i);
}
fp = fopen("keys_sage_gen/rlk1_1", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<NUM_PRIME; i++)
{
compute_mod(big_array, rlk11[i], i);
}
/*
fp = fopen("keys/rlk00_0to4095", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<NUM_PRIME; i++)
{
compute_mod(big_array, rlk00[i], i);
}
fp = fopen("keys/rlk01_0to4095", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<NUM_PRIME; i++)
{
compute_mod(big_array, rlk01[i], i);
}
fp = fopen("keys/rlk10_0to4095", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<NUM_PRIME; i++)
{
compute_mod(big_array, rlk10[i], i);
}
fp = fopen("keys/rlk11_0to4095", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<7; i++)
{
compute_mod(big_array, rlk11[i], i);
}
fp = fopen("keys/rlk20_0to4095", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<NUM_PRIME; i++)
{
compute_mod(big_array, rlk20[i], i);
}
fp = fopen("keys/rlk21_0to4095", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<NUM_PRIME; i++)
{
compute_mod(big_array, rlk21[i], i);
}
fp = fopen("keys/rlk30_0to4095", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<NUM_PRIME; i++)
{
compute_mod(big_array, rlk30[i], i);
}
fp = fopen("keys/rlk31_0to4095", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<NUM_PRIME; i++)
{
compute_mod(big_array, rlk31[i], i);
}
fp = fopen("keys/rlk40_0to4095", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<NUM_PRIME; i++)
{
compute_mod(big_array, rlk40[i], i);
}
fp = fopen("keys/rlk41_0to4095", "r");
for(i=0; i<4096; i++)
gmp_fscanf(fp, "%Zd", big_array[i]);
fclose(fp);
for(i=0; i<NUM_PRIME; i++)
{
compute_mod(big_array, rlk41[i], i);
}
*/
///////////////////////////////////////////////////////////////////////////
////////////////// Compute FFT of the keys /////////////////////////////
for(i=0; i<NUM_PRIME; i++)
{
fwd_ntt_q(pk0[i], i);
fwd_ntt_q(pk1[i], i);
fwd_ntt_q(sk[i], i);
fwd_ntt_q(rlk00[i], i);
fwd_ntt_q(rlk01[i], i);
fwd_ntt_q(rlk10[i], i);
fwd_ntt_q(rlk11[i], i);
fwd_ntt_q(rlk20[i], i);
fwd_ntt_q(rlk21[i], i);
fwd_ntt_q(rlk30[i], i);
fwd_ntt_q(rlk31[i], i);
fwd_ntt_q(rlk40[i], i);
fwd_ntt_q(rlk41[i], i);
}
}
void FV_recrypt(long long int c0[][4096], long long int c1[][4096])
{
int m[4096];
FV_dec_q(m, c0, c1);
FV_enc_q(m, c0, c1);
}
void FV_enc_q(int m[], long long int c0[][4096], long long int c1[][4096])
{
int i, j, r;
long long int primrt;
long long int m_encoded[4096], e1[4096], e2[4096], u[4096], u_copy[4096], pk0_mul_u[4096], pk1_mul_u[4096], e1_plus_m_encoded[4096];
knuth_yao(e1);
knuth_yao(e2);
for(i=0; i<4096; i++)
{
r = rand() % 2;
if(rand()%2==1)
r = -r;
u[i] = r;
}
for(i=0; i<NUM_PRIME; i++)
{
for(j=0; j<4096; j++)
m_encoded[j] = m[j] * pby_t[i];
poly_copy(u, u_copy);
fwd_ntt_q(u_copy, i);
//fwd_ntt_q(pk0[i], i);
//fwd_ntt_q(pk1[i], i);
coefficient_mul_q(pk0[i], u_copy, pk0_mul_u, i);
coefficient_mul_q(pk1[i], u_copy, pk1_mul_u, i); // e1_plus_m_encoded <-- m_encoded + e1
inv_ntt_q(pk0_mul_u, i); // pk0_mul_u <-- pk0*u
inv_ntt_q(pk1_mul_u, i); // pk1_mul_u <-- pk1*u
coefficient_add_q(e1, m_encoded, e1_plus_m_encoded, i); // e1_plus_m_encoded <-- m_encoded + e1
coefficient_add_q(pk0_mul_u, e1_plus_m_encoded, c0[i], i); // c0[i] <-- pk0*u + e1 + m_encoded
coefficient_add_q(pk1_mul_u, e2, c1[i], i); // c1[i] <-- pk1*u + e2
}
}
/*
void create_crt_rom(mpz_t q[], int length)
{
int i, j;
mpz_t q_full, Ni, Ni_inv, temp;
mpz_init(q_full);
mpz_init(Ni);
mpz_init(Ni_inv);
mpz_init(temp);
mpz_t mask;
mpz_init(mask);
mpz_set_str(q_full, "1", 10);
mpz_set_str(mask, "262143", 10);
for(i=0; i<length; i++)
mpz_mul(q_full, q_full, q[i]);
for(j=0; j<length; j++)
{
mpz_fdiv_q(Ni, q_full, q[j]);
mpz_invert(Ni_inv, Ni, q[j]);
gmp_printf("mux8_18bits rom(18'd%Zd, ", Ni_inv);
for(i=0; i<length-1; i++)
{
mpz_and(temp, Ni, mask);
if(i<length-2)
gmp_printf("18'd%Zd, ", temp);
else
gmp_printf("18'd%Zd, 18'd0, 18'd0, address, dataout);\n\n", temp);
mpz_sub(Ni, Ni, temp);
mpz_fdiv_q_2exp(Ni, Ni, 18);
}
}
}
*/
void inverse_crt_length7(long long int c0[][4096], mpz_t c0_full[])
{
int i, j;
int thread_num;
mpz_t temp;
mpz_init(temp);
//#pragma omp parallel for private(thread_num, j)
for(i=0; i<4096; i++)
{
//thread_num = omp_get_thread_num();
for(j=0; j<NUM_PRIME; j++)
{
mpz_mul_ui(temp, Ni_length7[j], c0[j][i]);
mpz_mul(temp, temp, Ni_inv_length7[j]);
mpz_mod(temp, temp, p_full_length7); // temp = c0[i][j]*Ni*Ni_inv mod q_full
if(j==0)
mpz_set(c0_full[i], temp);
else
mpz_add(c0_full[i], c0_full[i], temp);
}
mpz_mod(c0_full[i], c0_full[i], p_full_length7);
}
}
void inverse_crt_length15(long long int c0[][4096], mpz_t c0_full[])
{
int i, j;
int thread_num;
mpz_t temp;
mpz_init(temp);
//#pragma omp parallel for private(thread_num, j)
for(i=0; i<4096; i++)
{
for(j=0; j<NUM_PRIME_EXT; j++)
{
mpz_mul_ui(temp, Ni_length15[j], c0[j][i]);
mpz_mul(temp, temp, Ni_inv_length15[j]);
mpz_mod(temp, temp, p_full_length15); // temp = c0[i][j]*Ni*Ni_inv mod q_full
if(j==0)
mpz_set(c0_full[i], temp);
else
mpz_add(c0_full[i], c0_full[i], temp);
}
mpz_mod(c0_full[i], c0_full[i], p_full_length15);
}
}
int round_tx(mpz_t a[]) // computes round(t*c/q)
{
int i;
int thread_num;
mpz_t quotient, rem;
mpz_init(quotient);
mpz_init(rem);
//#pragma omp parallel for private(thread_num)
for(i=4095; i>=0; i--)
{
//thread_num = omp_get_thread_num();
mpz_mul_ui(a[i], a[i], t); // a[i] <-- a[i]*t
if(mpz_cmp_ui(a[i], 0)<0) // a[i] is -ve
{
mpz_ui_sub(a[i], 0, a[i]);
mpz_fdiv_qr(quotient, rem, a[i], p_full_length7);
if(mpz_cmp(rem, p_full_length7_by2)>0)
mpz_add_ui(quotient, quotient, 1);
mpz_ui_sub(a[i], 0, quotient);
}
else
{
mpz_fdiv_qr(quotient, rem, a[i], p_full_length7);
if(mpz_cmp(rem, p_full_length7_by2)>0)
mpz_add_ui(quotient, quotient, 1);
//gmp_printf("quo rem p_full_length7_by2 %Zd %Zd %Zd\n", quotient, rem, p_full_length7_by2);
mpz_set(a[i], quotient);
}
}
}
int round_tx_mod(mpz_t a[]) // computes mod( round(t*c/q), q )
{
int i;
int thread_num;
mpz_t quotient, rem;
mpz_init(quotient);
mpz_init(rem);
//#pragma omp parallel for private(thread_num)
for(i=0; i<4096; i++)
{
//thread_num = omp_get_thread_num();
mpz_mul_ui(a[i], a[i], t);
if(mpz_cmp_ui(a[i], 0)<0) // a[i] is -ve
{
mpz_ui_sub(a[i], 0, a[i]);
mpz_fdiv_qr(quotient, rem, a[i], p_full_length7);
if(mpz_cmp(rem, p_full_length7_by2)>0)
mpz_add_ui(quotient, quotient, 1);
mpz_ui_sub(a[i], 0, quotient);
mpz_mod(a[i], a[i], p_full_length7);
}
else
{
mpz_fdiv_qr(quotient, rem, a[i], p_full_length7);
if(mpz_cmp(rem, p_full_length7_by2)>0)
mpz_add_ui(quotient, quotient, 1);
mpz_set(a[i], quotient);
mpz_mod(a[i], a[i], p_full_length7);
}
}
}
void FV_dec_q(int m[], long long int c0[][4096], long long int c1[][4096])
{
int i;
long long int sk_mul_c1[NUM_PRIME][4096];
mpz_t c1_full[4096];
mpz_t temp;
mpz_array_init(c1_full[0], 4096, 512);
mpz_init(temp);
for(i=0; i<NUM_PRIME; i++)
{
fwd_ntt_q(c1[i], i);
coefficient_mul_q(sk[i], c1[i], sk_mul_c1[i], i);
inv_ntt_q(sk_mul_c1[i], i);
coefficient_add_q(c0[i], sk_mul_c1[i], sk_mul_c1[i], i); // sk_mul_c1 <-- c0 + sk_mul_c1
}
inverse_crt_length7(sk_mul_c1, c1_full);
centerlift(c1_full);
round_tx(c1_full); // round t*c/q
for(i=4095; i>=0; i--)
{
//if(mpz_cmp(c1_full[i], p_full_length7_by4)>=0 && mpz_cmp(c1_full[i], p_full_length7_by4_mul3)<0)
//m[i]=1;
//else
//m[i]=0;
mpz_mod_ui(temp, c1_full[i], t); // temp = c1_full[i] % t
m[i] = mpz_get_ui(temp);
}
mpz_clear(c1_full[0]);
}
int FV_add(long long int c10[][4096], long long int c11[][4096], long long int c20[][4096], long long int c21[][4096], long long int c0[][4096], long long int c1[][4096])
{
int i;
for(i=0; i<NUM_PRIME; i++)
{
poly_add_q(c10[i], c20[i], c0[i], i);
poly_add_q(c11[i], c21[i], c1[i], i);
}
}
int FV_sub(long long int c10[][4096], long long int c11[][4096], long long int c20[][4096], long long int c21[][4096], long long int c0[][4096], long long int c1[][4096])
{
int i;
for(i=0; i<NUM_PRIME; i++)
{
poly_sub_q(c10[i], c20[i], c0[i], i);
poly_sub_q(c11[i], c21[i], c1[i], i);
}
}
int FV_mul(long long int c10[][4096], long long int c11[][4096], long long int c20[][4096], long long int c21[][4096], long long int c0[][4096], long long int c1[][4096])
{
int i, j, index;
FILE *fp;
long long int c10_QL[NUM_PRIME_EXT][4096], c11_QL[NUM_PRIME_EXT][4096], c20_QL[NUM_PRIME_EXT][4096], c21_QL[NUM_PRIME_EXT][4096], c2[NUM_PRIME_EXT][4096];
long long int c10_mul_c20[NUM_PRIME_EXT][4096], c10_mul_c21[NUM_PRIME_EXT][4096], c11_mul_c20[NUM_PRIME_EXT][4096], c11_mul_c21[NUM_PRIME_EXT][4096];
mpz_t c10_full[4096], c11_full[4096], c20_full[4096], c21_full[4096];
mpz_t c0_full[4096], c1_full[4096], c2_full[4096];
long long int primrt;
int num_thread;
mpz_array_init(c10_full[0], 4096, 512);
mpz_array_init(c11_full[0], 4096, 512);
mpz_array_init(c20_full[0], 4096, 512);
mpz_array_init(c21_full[0], 4096, 512);
mpz_array_init(c0_full[0], 4096, 512);
mpz_array_init(c1_full[0], 4096, 512);
mpz_array_init(c2_full[0], 4096, 512);
/*
for(i=0; i<NUM_PRIME; i++)
{
for(j=0; j<4096; j++)
{
c10[i][j] = j + 4096*i;
c11[i][j] = j + 4096*i;
c20[i][j] = j + 4096*i;
c21[i][j] = j + 4096*i;
}
}
*/
/*
fp = fopen("c11_shares", "w");
uint64_t a0, a1;
uint64_t a;
for(j=0; j<6; j++)
{
for(i=0; i<2048; i++)
{
a0 = c21[j][i]; a1 = c21[j][i+2048];
a = a0 + a1*1073741824;
if(i==0)
fprintf(fp, "{%lu,\n", a);
else if(i!=2047)
fprintf(fp, "%lu,\n", a);
else
fprintf(fp, "%lu},\n", a);
}
fprintf(fp, "\n");
}
fclose(fp);
*/
inverse_crt_length7(c10, c10_full);
inverse_crt_length7(c11, c11_full);
inverse_crt_length7(c20, c20_full);
inverse_crt_length7(c21, c21_full);
centerlift(c10_full);
centerlift(c11_full);
centerlift(c20_full);
centerlift(c21_full);
map_to_QL(c10_full, c10_QL);
map_to_QL(c11_full, c11_QL);
map_to_QL(c20_full, c20_QL);
map_to_QL(c21_full, c21_QL);
//#pragma omp parallel for
for(i=0; i<NUM_PRIME_EXT; i++)
{
fwd_ntt_q(c10_QL[i], i);
fwd_ntt_q(c11_QL[i], i);
fwd_ntt_q(c20_QL[i], i);
fwd_ntt_q(c21_QL[i], i);
coefficient_mul_q(c10_QL[i], c20_QL[i], c10_mul_c20[i], i);
coefficient_mul_q(c10_QL[i], c21_QL[i], c10_mul_c21[i], i);
coefficient_mul_q(c11_QL[i], c20_QL[i], c11_mul_c20[i], i);
coefficient_mul_q(c11_QL[i], c21_QL[i], c11_mul_c21[i], i);
inv_ntt_q(c10_mul_c20[i], i); // c0[i] = c10*c20 mod q[i]
poly_copy(c10_mul_c20[i], c0[i]);
coefficient_add_q(c10_mul_c21[i], c11_mul_c20[i], c1[i], i);
inv_ntt_q(c1[i], i); // c1[i] = c10*c21 mod q[i]
inv_ntt_q(c11_mul_c21[i], i); // c2[i] = c11*c21 mod q[i]
poly_copy(c11_mul_c21[i], c2[i]);
}
/*
printf("print c2 j=5 \n");
for(i=0; i<13; i++)
{
//for(j=0; j<2048; j++)
j = 5;
printf("%ld %ld ", c0[i][j+2048], c0[i][j]);
}
printf("print c0 end\n");
*/
inverse_crt_length15(c0, c0_full);
inverse_crt_length15(c1, c1_full);
inverse_crt_length15(c2, c2_full);
centerlift_QL(c0_full);
round_tx_mod(c0_full);
centerlift_QL(c1_full);
round_tx_mod(c1_full);
centerlift_QL(c2_full);
round_tx_mod(c2_full);
centerlift(c2_full);
compute_shares(c0_full, c0);
compute_shares(c1_full, c1);
FV_relin(c0, c1, c2_full);
/*
for(i=0; i<6; i++)
{
for(j=0; j<2048; j++)
{
printf("%ld %ld\n", c0[i][j+2048], c0[i][j]);
}
}
*/
/*
for(i=0; i<6; i++)
{
for(j=0; j<2048; j++)
{
printf("%ld %ld\n", c0[i][j+2048], c0[i][j]);
}
}
*/
/*
fp = fopen("c0_fpga.txt", "r");
for(i=0; i<6; i++)
{
for(j=0; j<2048; j++)
{
fscanf(fp, "%ld %ld", &c0[i][j+2048], &c0[i][j]);
}
}
fclose(fp);
fp = fopen("c1_fpga.txt", "r");
for(i=0; i<6; i++)
{
for(j=0; j<2048; j++)
{
fscanf(fp, "%ld %ld", &c1[i][j+2048], &c1[i][j]);
}
}
fclose(fp);
*/
mpz_clear(c10_full[0]);
mpz_clear(c11_full[0]);
mpz_clear(c20_full[0]);
mpz_clear(c21_full[0]);
mpz_clear(c0_full[0]);
mpz_clear(c1_full[0]);
mpz_clear(c2_full[0]);
}
int FV_relin(long long int c0_shares[][4096], long long int c1_shares[][4096], mpz_t c2_full[])
{
int i, j;
mpz_t cwd0[4096], cwd1[4096], cwd2[4096], cwd3[4096], cwd4[4096];
mpz_array_init(cwd0[0], 4096, 256);
mpz_array_init(cwd1[0], 4096, 256);
mpz_array_init(cwd2[0], 4096, 256);
mpz_array_init(cwd3[0], 4096, 256);
mpz_array_init(cwd4[0], 4096, 256);
long long int rlk0_mul_cwd[NUM_PRIME][4096], rlk1_mul_cwd[NUM_PRIME][4096];
long long int cwd0_shares[NUM_PRIME][4096], cwd1_shares[NUM_PRIME][4096], cwd2_shares[NUM_PRIME][4096], cwd3_shares[NUM_PRIME][4096], cwd4_shares[NUM_PRIME][4096];
long long int temp[NUM_PRIME][4096];
word_decomp(c2_full, cwd0, cwd1, cwd2, cwd3, cwd4);
compute_shares(cwd0, cwd0_shares);
compute_shares(cwd1, cwd1_shares);
//compute_shares(cwd2, cwd2_shares);
//compute_shares(cwd3, cwd3_shares);
//compute_shares(cwd4, cwd4_shares);
// Problem found for j = 5;
// correct value 930507122 606471640
// received value 700752289 606471640
/*
for(i=0; i<1; i++)
{
for(j=0; j<2048; j++)
{
printf("j=%d\n", j);
printf("%ld %ld ", cwd0_shares[i][j+2048], cwd0_shares[i][j]);
}
}
*/
for(i=0; i<NUM_PRIME; i++)
{
fwd_ntt_q(cwd0_shares[i], i);
coefficient_mul_q(rlk00[i], cwd0_shares[i], rlk0_mul_cwd[i], i);
fwd_ntt_q(cwd1_shares[i], i);
coefficient_mul_q(rlk10[i], cwd1_shares[i], temp[i], i);
coefficient_add_q(rlk0_mul_cwd[i], temp[i], rlk0_mul_cwd[i], i); // rlk0_mul_cwd[i] = rlk00[i]*cwd0_shares[i] + rlk10[i]*cwd1_shares[i]
coefficient_mul_q(rlk01[i], cwd0_shares[i], rlk1_mul_cwd[i], i);
coefficient_mul_q(rlk11[i], cwd1_shares[i], temp[i], i);
coefficient_add_q(rlk1_mul_cwd[i], temp[i], rlk1_mul_cwd[i], i);
inv_ntt_q(rlk0_mul_cwd[i], i);
inv_ntt_q(rlk1_mul_cwd[i], i);
coefficient_add_q(c0_shares[i], rlk0_mul_cwd[i], c0_shares[i], i); // c0_shares[i] = c0_shares[i]+ sum[rlk_i0*cwd_i]
coefficient_add_q(c1_shares[i], rlk1_mul_cwd[i], c1_shares[i], i); // c1_shares[i] = c1_shares[i]+ sum[rlk_i1*cwd_i]
}
//printf("Relin c0_shares[0][0]=%ld\n",c0_shares[0][0]);
mpz_clear(cwd0[0]); mpz_clear(cwd1[0]); mpz_clear(cwd2[0]); mpz_clear(cwd3[0]); mpz_clear(cwd4[0]);
}
int word_decomp(mpz_t c[], mpz_t cwd0[], mpz_t cwd1[], mpz_t cwd2[], mpz_t cwd3[], mpz_t cwd4[])
{
int i, j;
int sign;
mpz_t mask; mpz_init(mask);
mpz_set_str(mask, "2475880078570760549798248447", 10); // mask=2^32-1
mpz_t two_to_32; mpz_init(two_to_32);
mpz_set_str(two_to_32, "2475880078570760549798248448", 10);
mpz_t two_to_31; mpz_init(two_to_31);
mpz_set_str(two_to_31, "1237940039285380274899124224", 10);
mpz_t chunk;
mpz_init(chunk);
int thread_num;
//#pragma omp parallel for private(thread_num, sign, j)
for(i=0; i<4096; i++)
{
//thread_num = omp_get_thread_num();
sign=0;
if(mpz_cmp_ui(c[i], 0)<0)
{
sign = 1;
mpz_ui_sub(c[i], 0, c[i]);
}
for(j=0; j<2; j++)
{
mpz_and(chunk, c[i], mask);
mpz_sub(c[i], c[i], chunk);
mpz_fdiv_q_2exp(c[i], c[i], 91); // c[i] = c[i]>>91
/*
if(mpz_cmp(chunk, two_to_31)>0) // if chunk > 2^31
{
mpz_sub(chunk, chunk, two_to_32); // chunk = chunk- 2^32
mpz_add_ui(c[i], c[i], 1);
}
*/
if(sign) mpz_ui_sub(chunk, 0, chunk); // chunk = -chunk
if(j==0) mpz_mod(cwd0[i], chunk, p_full_length7);
if(j==1) mpz_mod(cwd1[i], chunk, p_full_length7);
if(j==2) mpz_mod(cwd2[i], chunk, p_full_length7);
if(j==3) mpz_mod(cwd3[i], chunk, p_full_length7);
if(j==4) mpz_mod(cwd4[i], chunk, p_full_length7);
}
}
}
int word_decomp_32bit(mpz_t c[], mpz_t cwd0[], mpz_t cwd1[], mpz_t cwd2[], mpz_t cwd3[], mpz_t cwd4[])
{
int i, j;
int sign;
mpz_t mask; mpz_init(mask);
mpz_set_str(mask, "4294967295", 10); // mask=2^32-1
mpz_t two_to_32; mpz_init(two_to_32);
mpz_set_str(two_to_32, "4294967296", 10);
mpz_t two_to_31; mpz_init(two_to_31);
mpz_set_str(two_to_31, "2147483648", 10);
mpz_t chunk;
mpz_init(chunk);
int thread_num;
//#pragma omp parallel for private(thread_num, sign, j)
for(i=0; i<4096; i++)
{
//thread_num = omp_get_thread_num();
sign=0;
if(mpz_cmp_ui(c[i], 0)<0)
{
sign = 1;
mpz_ui_sub(c[i], 0, c[i]);
}
for(j=0; j<5; j++)
{
mpz_and(chunk, c[i], mask);
mpz_sub(c[i], c[i], chunk);
mpz_fdiv_q_2exp(c[i], c[i], 32); // c[i] = c[i]>>32
if(mpz_cmp(chunk, two_to_31)>0) // if chunk > 2^31
{
mpz_sub(chunk, chunk, two_to_32); // chunk = chunk- 2^32
mpz_add_ui(c[i], c[i], 1);
}
if(sign) mpz_ui_sub(chunk, 0, chunk); // chunk = -chunk
if(j==0) mpz_mod(cwd0[i], chunk, p_full_length7);
if(j==1) mpz_mod(cwd1[i], chunk, p_full_length7);
if(j==2) mpz_mod(cwd2[i], chunk, p_full_length7);
if(j==3) mpz_mod(cwd3[i], chunk, p_full_length7);
if(j==4) mpz_mod(cwd4[i], chunk, p_full_length7);
}
}
}
void compute_shares(mpz_t a[], long long int a_shares[][4096])
{
int i, j;
int thread_num;
mpz_t temp;
mpz_init(temp);
//#pragma omp parallel for private(thread_num, j)
for(i=0; i<4096; i++)
{
//thread_num = omp_get_thread_num();
for(j=0; j<NUM_PRIME; j++)
{
mpz_mod_ui(temp, a[i], p[j]);
a_shares[j][i] = mpz_get_ui(temp);
}
}
}
void compute_mod(mpz_t a[],long long int b[], int prime_index)
{
int i;
mpz_t temp; mpz_init(temp);
for(i=0; i<4096; i++)
{
mpz_mod_ui(temp, a[i], p[prime_index]);
b[i] = mpz_get_ui(temp);
}
}
int centerlift(mpz_t a[])
{
int i;
//#pragma omp parallel for
for(i=0; i<4096; i++)
{
if(mpz_cmp(a[i], p_full_length7_by2)>0)
mpz_sub(a[i], a[i], p_full_length7); // a[i] = a[i]-q
}
}
int centerlift_QL(mpz_t a[])
{
int i;
//#pragma omp parallel for
for(i=0; i<4096; i++)
{
if(mpz_cmp(a[i], p_full_length15_by2)>0)
mpz_sub(a[i], a[i], p_full_length15); // a[i] = a[i]-q
}
}
int map_to_QL(mpz_t a[], long long int b[][4096])
{
int i, j;
int thread_num;
mpz_t temp; mpz_init(temp);
//#pragma omp parallel for private(thread_num, j)
for(i=0; i<4096; i++)
{
mpz_mod(a[i], a[i], p_full_length15);
for(j=0; j<NUM_PRIME_EXT; j++)
{
mpz_mod_ui(temp, a[i], p[j]);
b[j][i] = mpz_get_ui(temp);
}
}
}
void coefficient_mul_q(long long int a[], long long int b[], long long int c[], int prime_index)
{
int j;
for(j=0; j<4096; j++)
{
c[j] = mod(a[j] * b[j], prime_index);
}
}
void coefficient_add_q(long long int a[], long long int b[], long long int c[], int prime_index)
{
int j;
for(j=0; j<4096; j++)
{
c[j] = mod(a[j] + b[j], prime_index);
}
}
void message_gen(int m[])
{
FILE *fm;
int i, r1, r2;
for(i=0;i<4096;i++)
{
m[i]=0;
}
m[0]=random()%2;
}
void poly_copy(long long int a[], long long int b[])
{
int i;
for(i=0; i<4096; i++)
b[i] = a[i];
}
/*
void message_encrypt(int m, mpz_t c[])
{
int message[4096];
int i;
for(i=0; i<4096; i++)
message[i] = 0;
message[0] = m;
YASHE_enc(message, c);
}
*/
|
requantize_leakyrelu_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void requantize_leakyrelu_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& scale_in_data, const Mat& scale_out_data, const Mat& bias_data, float slope, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int size = w * h;
int outc = top_blob.c;
int out_elempack = top_blob.elempack;
int scale_in_data_size = scale_in_data.w;
int scale_out_data_size = scale_out_data.w;
int bias_data_size = bias_data.w;
// int8(leakyrelu(v * scale_in, slope) * scale_out)
// int8_leakyrelu(v * (scale_in * scale_out), slope)
// int8(leakyrelu(v * scale_in + bias, slope) * scale_out)
// int8_leakyrelu(v * (scale_in * scale_out) + (bias * scale_out), slope)
if (out_elempack == 8)
{
if (bias_data_size == 0)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc; q++)
{
const int* intptr0 = bottom_blob.channel(q * 2);
const int* intptr1 = bottom_blob.channel(q * 2 + 1);
signed char* ptr = top_blob.channel(q);
float32x4_t _scale_in0 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8);
float32x4_t _scale_in1 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8 + 4);
float32x4_t _scale_out0 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8);
float32x4_t _scale_out1 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8 + 4);
float32x4_t _scale0 = vmulq_f32(_scale_in0, _scale_out0);
float32x4_t _scale1 = vmulq_f32(_scale_in1, _scale_out1);
float32x4_t _slope = vdupq_n_f32(slope);
int i = 0;
#if __aarch64__
for (; i + 3 < size; i += 4)
{
float32x4_t _v00 = vcvtq_f32_s32(vld1q_s32(intptr0));
float32x4_t _v01 = vcvtq_f32_s32(vld1q_s32(intptr0 + 4));
float32x4_t _v02 = vcvtq_f32_s32(vld1q_s32(intptr0 + 8));
float32x4_t _v03 = vcvtq_f32_s32(vld1q_s32(intptr0 + 12));
float32x4_t _v10 = vcvtq_f32_s32(vld1q_s32(intptr1));
float32x4_t _v11 = vcvtq_f32_s32(vld1q_s32(intptr1 + 4));
float32x4_t _v12 = vcvtq_f32_s32(vld1q_s32(intptr1 + 8));
float32x4_t _v13 = vcvtq_f32_s32(vld1q_s32(intptr1 + 12));
_v00 = vmulq_f32(_v00, _scale0);
_v01 = vmulq_f32(_v01, _scale0);
_v02 = vmulq_f32(_v02, _scale0);
_v03 = vmulq_f32(_v03, _scale0);
_v10 = vmulq_f32(_v10, _scale1);
_v11 = vmulq_f32(_v11, _scale1);
_v12 = vmulq_f32(_v12, _scale1);
_v13 = vmulq_f32(_v13, _scale1);
vst1_s8(ptr, float2int8leakyrelu(_v00, _v10, _slope));
vst1_s8(ptr + 8, float2int8leakyrelu(_v01, _v11, _slope));
vst1_s8(ptr + 16, float2int8leakyrelu(_v02, _v12, _slope));
vst1_s8(ptr + 24, float2int8leakyrelu(_v03, _v13, _slope));
intptr0 += 16;
intptr1 += 16;
ptr += 32;
}
#endif // __aarch64__
for (; i < size; i++)
{
float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr0));
float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr1));
_v0 = vmulq_f32(_v0, _scale0);
_v1 = vmulq_f32(_v1, _scale1);
vst1_s8(ptr, float2int8leakyrelu(_v0, _v1, _slope));
intptr0 += 4;
intptr1 += 4;
ptr += 8;
}
}
}
else
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < outc; q++)
{
const int* intptr0 = bottom_blob.channel(q * 2);
const int* intptr1 = bottom_blob.channel(q * 2 + 1);
signed char* ptr = top_blob.channel(q);
float32x4_t _scale_in0 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8);
float32x4_t _scale_in1 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8 + 4);
float32x4_t _scale_out0 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8);
float32x4_t _scale_out1 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8 + 4);
float32x4_t _bias0 = bias_data_size == 1 ? vdupq_n_f32(bias_data[0]) : vld1q_f32((const float*)bias_data + q * 8);
float32x4_t _bias1 = bias_data_size == 1 ? vdupq_n_f32(bias_data[0]) : vld1q_f32((const float*)bias_data + q * 8 + 4);
float32x4_t _scale0 = vmulq_f32(_scale_in0, _scale_out0);
float32x4_t _scale1 = vmulq_f32(_scale_in1, _scale_out1);
_bias0 = vmulq_f32(_bias0, _scale_out0);
_bias1 = vmulq_f32(_bias1, _scale_out1);
float32x4_t _slope = vdupq_n_f32(slope);
int i = 0;
#if __aarch64__
for (; i + 3 < size; i += 4)
{
float32x4_t _v00 = vcvtq_f32_s32(vld1q_s32(intptr0));
float32x4_t _v01 = vcvtq_f32_s32(vld1q_s32(intptr0 + 4));
float32x4_t _v02 = vcvtq_f32_s32(vld1q_s32(intptr0 + 8));
float32x4_t _v03 = vcvtq_f32_s32(vld1q_s32(intptr0 + 12));
float32x4_t _v10 = vcvtq_f32_s32(vld1q_s32(intptr1));
float32x4_t _v11 = vcvtq_f32_s32(vld1q_s32(intptr1 + 4));
float32x4_t _v12 = vcvtq_f32_s32(vld1q_s32(intptr1 + 8));
float32x4_t _v13 = vcvtq_f32_s32(vld1q_s32(intptr1 + 12));
_v00 = vfmaq_f32(_bias0, _v00, _scale0);
_v01 = vfmaq_f32(_bias0, _v01, _scale0);
_v02 = vfmaq_f32(_bias0, _v02, _scale0);
_v03 = vfmaq_f32(_bias0, _v03, _scale0);
_v10 = vfmaq_f32(_bias1, _v10, _scale1);
_v11 = vfmaq_f32(_bias1, _v11, _scale1);
_v12 = vfmaq_f32(_bias1, _v12, _scale1);
_v13 = vfmaq_f32(_bias1, _v13, _scale1);
vst1_s8(ptr, float2int8leakyrelu(_v00, _v10, _slope));
vst1_s8(ptr + 8, float2int8leakyrelu(_v01, _v11, _slope));
vst1_s8(ptr + 16, float2int8leakyrelu(_v02, _v12, _slope));
vst1_s8(ptr + 24, float2int8leakyrelu(_v03, _v13, _slope));
intptr0 += 16;
intptr1 += 16;
ptr += 32;
}
#endif // __aarch64__
for (; i + 1 < size; i += 2)
{
float32x4_t _v00 = vcvtq_f32_s32(vld1q_s32(intptr0));
float32x4_t _v01 = vcvtq_f32_s32(vld1q_s32(intptr0 + 4));
float32x4_t _v10 = vcvtq_f32_s32(vld1q_s32(intptr1));
float32x4_t _v11 = vcvtq_f32_s32(vld1q_s32(intptr1 + 4));
#if __aarch64__
_v00 = vfmaq_f32(_bias0, _v00, _scale0);
_v01 = vfmaq_f32(_bias0, _v01, _scale0);
_v10 = vfmaq_f32(_bias1, _v10, _scale1);
_v11 = vfmaq_f32(_bias1, _v11, _scale1);
#else // __aarch64__
_v00 = vmlaq_f32(_bias0, _v00, _scale0);
_v01 = vmlaq_f32(_bias0, _v01, _scale0);
_v10 = vmlaq_f32(_bias1, _v10, _scale1);
_v11 = vmlaq_f32(_bias1, _v11, _scale1);
#endif // __aarch64__
vst1_s8(ptr, float2int8leakyrelu(_v00, _v10, _slope));
vst1_s8(ptr + 8, float2int8leakyrelu(_v01, _v11, _slope));
intptr0 += 8;
intptr1 += 8;
ptr += 16;
}
for (; i < size; i++)
{
float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr0));
float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr1));
#if __aarch64__
_v0 = vfmaq_f32(_bias0, _v0, _scale0);
_v1 = vfmaq_f32(_bias1, _v1, _scale1);
#else // __aarch64__
_v0 = vmlaq_f32(_bias0, _v0, _scale0);
_v1 = vmlaq_f32(_bias1, _v1, _scale1);
#endif // __aarch64__
vst1_s8(ptr, float2int8leakyrelu(_v0, _v1, _slope));
intptr0 += 4;
intptr1 += 4;
ptr += 8;
}
}
}
}
if (out_elempack == 1)
{
if (bias_data_size == 0)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const int* intptr = bottom_blob.channel(q);
signed char* ptr0 = top_blob.channel(q * 4);
signed char* ptr1 = top_blob.channel(q * 4 + 1);
signed char* ptr2 = top_blob.channel(q * 4 + 2);
signed char* ptr3 = top_blob.channel(q * 4 + 3);
float32x4_t _scale_in = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 4);
float32x4_t _scale_out = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 4);
float32x4_t _scale = vmulq_f32(_scale_in, _scale_out);
float32x4_t _slope = vdupq_n_f32(slope);
int i = 0;
for (; i < size; i++)
{
float32x4_t _v = vcvtq_f32_s32(vld1q_s32(intptr));
_v = vmulq_f32(_v, _scale);
int8x8_t v = float2int8leakyrelu(_v, _v, _slope);
ptr0[0] = vget_lane_s8(v, 0);
ptr1[0] = vget_lane_s8(v, 1);
ptr2[0] = vget_lane_s8(v, 2);
ptr3[0] = vget_lane_s8(v, 3);
intptr += 4;
ptr0 += 1;
ptr1 += 1;
ptr2 += 1;
ptr3 += 1;
}
}
}
else
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const int* intptr = bottom_blob.channel(q);
signed char* ptr0 = top_blob.channel(q * 4);
signed char* ptr1 = top_blob.channel(q * 4 + 1);
signed char* ptr2 = top_blob.channel(q * 4 + 2);
signed char* ptr3 = top_blob.channel(q * 4 + 3);
float32x4_t _scale_in = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 4);
float32x4_t _scale_out = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 4);
float32x4_t _bias = bias_data_size == 1 ? vdupq_n_f32(bias_data[0]) : vld1q_f32((const float*)bias_data + q * 4);
float32x4_t _scale = vmulq_f32(_scale_in, _scale_out);
_bias = vmulq_f32(_bias, _scale_out);
float32x4_t _slope = vdupq_n_f32(slope);
int i = 0;
for (; i < size; i++)
{
float32x4_t _v = vcvtq_f32_s32(vld1q_s32(intptr));
#if __aarch64__
_v = vfmaq_f32(_bias, _v, _scale);
#else
_v = vmlaq_f32(_bias, _v, _scale);
#endif
int8x8_t v = float2int8leakyrelu(_v, _v, _slope);
ptr0[0] = vget_lane_s8(v, 0);
ptr1[0] = vget_lane_s8(v, 1);
ptr2[0] = vget_lane_s8(v, 2);
ptr3[0] = vget_lane_s8(v, 3);
intptr += 4;
ptr0 += 1;
ptr1 += 1;
ptr2 += 1;
ptr3 += 1;
}
}
}
}
}
|
kronecker.h | /**
* @file kronecker.h
* @author Nader KHAMMASSI - nader.khammassi@gmail.com
* @date
* @brief
*/
#include <iostream>
#include <vector>
#include <complex>
#include <cstring>
#include <core/linalg.h>
#ifdef USE_LIBDIVIDE
#include <libdivide.h>
#endif
//#define println(x) std::cout << x << std::endl
//#define print(x) std::cout << x
/**
* type definition
*/
//typedef std::complex<double> complex_t;
typedef std::vector<qx::linalg::complex_t> row_t;
typedef std::vector<row_t> matrix_t;
typedef std::vector<qx::linalg::complex_t> vector_t;
namespace qx
{
namespace linalg
{
/**
* kronecker operator interface
*/
class kronecker_operator
{
public:
virtual complex_t get(size_t i, size_t j) const = 0;
virtual size_t size() const = 0;
};
/**
* identity
*/
class identity : public kronecker_operator
{
public:
identity(size_t n) : n(n), zero(0.0), one(1.0)
{ }
inline complex_t get(size_t i, size_t j) const
{
return (i==j ? one : zero);
}
size_t size() const
{
return n;
}
private:
size_t n;
const complex_t zero;
const complex_t one;
};
/**
* unitary matrix
*/
class unitary_matrix : public kronecker_operator
{
public:
unitary_matrix(size_t n, matrix_t& m) : n(n), m(m)
{ }
inline complex_t get(size_t i, size_t j) const
{
return (m[i][j]);
}
size_t size() const
{
return n;
}
private:
size_t n;
matrix_t m;
};
/**
* kronecker
*/
class kronecker
{
public:
kronecker(kronecker_operator * m1,
kronecker_operator * m2,
kronecker_operator * m3=NULL) : m1(m1),
m2(m2),
m3(m3)
{
}
inline complex_t get(size_t i, size_t j) const
{
if (!m3)
{
size_t n1 = m1->size();
size_t n2 = m2->size();
complex_t c1 = m1->get(i/n2, j/n2);
complex_t c2 = m2->get(i%n2, j%n2);
// usleep((i+1)*500+(j+i)*500); println("k.get(" << i << "," << j << ") : " << c1 << " * " << c2 << "(n1=" << n1 << ", n2=" << n2 << ")");
return (c1*c2);
} else
{
size_t n1 = m1->size();
size_t n2 = m2->size();
size_t n3 = m3->size();
complex_t c1 = m1->get(i/(n2*n3), j/(n2*n3));
complex_t c2 = m2->get((i/n3)%n2, (j/n3)%n2);
complex_t c3 = m3->get(i%n3, j%n3);
return (c1*c2*c3);
}
}
private:
kronecker_operator * m1;
kronecker_operator * m2;
kronecker_operator * m3;
};
/**
* const
*/
const static complex_t __c_zero__;
const static complex_t __c_one__ = 1.0f;
const static complex_t i_diag[] = { 0.0, 1.0 };
#if 0
/**
* kronecker
*/
class kronecker_ui
{
public:
kronecker_ui(cmatrix_t& m, size_t nm, size_t ni) : m(m), nm(nm), ni(ni)
{
}
inline complex_t get(size_t i, size_t j)
{
return m(i%nm,j%nm);
/*
complex_t& c1 = m(i%nm,j%nm); // U
// usleep((i+1)*500+(j+i)*500); println("k_ui.get(" << i << "," << j << ") : " << c1 << " * " << c2 << "(nm=" << nm << ", ni=" << ni << ")");
return ((i/nm) == (j/nm) ? c1 : __c_zero__);
*/
}
inline void get(size_t i, size_t j, complex_t& c)
{
complex_t& c1 = m(i%nm,j%nm); // U
const complex_t& c2 = ((i/nm) == (j/nm) ? __c_one__ : __c_zero__); // I
c = c1*c2;
}
cmatrix_t m;
private:
size_t nm;
size_t ni;
};
/**
* kronecker
*/
class kronecker_iu
{
public:
kronecker_iu(cmatrix_t& m, size_t nm, size_t ni) : m(m), nm(nm), ni(ni)
{
}
inline complex_t get(size_t i, size_t j)
{
return m(i/ni,j/ni);
/*
complex_t& c1 = m(i/ni,j/ni); // U
// usleep((i+1)*500+(j+i)*500); println("k_ui.get(" << i << "," << j << ") : " << c1 << " * " << c2 << "(nm=" << nm << ", ni=" << ni << ")");
return ((i%nm) == (j%nm) ? c1 : __c_zero__);
*/
}
inline void get(size_t i, size_t j, complex_t& c)
{
complex_t& c1 = m(i/nm,j/nm); // U
const complex_t& c2 = ((i%nm) == (j%nm) ? __c_one__ : __c_zero__); // I
c = c1*c2;
}
cmatrix_t m;
private:
size_t nm;
size_t ni;
};
/**
* kronecker_iui
*/
class kronecker_iui
{
public:
kronecker_iui(cmatrix_t& m, size_t nm, size_t ni1, size_t ni2) : m(m), nm(nm), ni1(ni1), ni2(ni2)
{
}
inline complex_t get(size_t i, size_t j)
{
return m((i/ni2)%nm,(j/ni2)%nm);
/*
complex_t& c = m((i/ni2)%nm,(j/ni2)%nm); // U
bool i1 = (i/(nm*ni2)) == (j/(nm*ni2));
bool i2 = ((i%ni2) == (j%ni2));
return ((i1 && i2) ? c : __c_zero__);
*/
}
inline void get(size_t i, size_t j, complex_t& c)
{
complex_t& c1 = m(i%nm,j%nm); // U
const complex_t& c2 = ((i/nm) == (j/nm) ? __c_one__ : __c_zero__); // I
c = c1*c2;
}
cmatrix_t m;
private:
size_t nm;
size_t ni1;
size_t ni2;
};
#endif
#define __mod_2(x) (x & 1)
/**
* kronecker
*/
class kronecker_ui
{
public:
kronecker_ui(const complex_t * m, size_t nm, size_t ni) : m(m), nm(nm), ni(ni)
{
}
inline complex_t get(size_t i, size_t j)
{
// return m(i%nm,j%nm);
// return m[(i%2)*2+j%2];
return m[__mod_2(i)*2+__mod_2(j)];
}
/*
inline void get(size_t i, size_t j, complex_t& c)
{
complex_t& c1 = m(i%nm,j%nm); // U
const complex_t& c2 = ((i/nm) == (j/nm) ? __c_one__ : __c_zero__); // I
c = c1*c2;
}
*/
const complex_t * m;
private:
size_t nm;
size_t ni;
};
/**
* kronecker
*/
#if 0
class kronecker_iu
{
public:
kronecker_iu(const complex_t * m, size_t nm, size_t ni) : m(m), nm(nm), ni(ni), fast_ni(ni)
{
}
inline complex_t get(uint64_t i, uint64_t j)
{
// return m(i/ni,j/ni);
// return m[(i/ni)*2+(j/ni)];
// return m[(i/fast_ni)*2+(j/fast_ni)];
return m[(2*i+j)/fast_ni];
}
/*
inline void get(size_t i, size_t j, complex_t& c)
{
complex_t& c1 = m(i/nm,j/nm); // U
const complex_t& c2 = ((i%nm) == (j%nm) ? __c_one__ : __c_zero__); // I
c = c1*c2;
}
*/
const complex_t * m;
private:
uint64_t nm;
uint64_t ni;
libdivide::divider<uint64_t> fast_ni;
};
#endif
/**
* kronecker
*/
class kronecker_iu
{
public:
kronecker_iu(const complex_t * m, size_t nm, size_t ni) : m(m), nm(nm), ni(ni)
{
}
inline complex_t get(size_t i, size_t j)
{
return m(i/ni,j/ni);
/*
complex_t& c1 = m(i/ni,j/ni); // U
// usleep((i+1)*500+(j+i)*500); println("k_ui.get(" << i << "," << j << ") : " << c1 << " * " << c2 << "(nm=" << nm << ", ni=" << ni << ")");
return ((i%nm) == (j%nm) ? c1 : __c_zero__);
*/
}
inline void get(size_t i, size_t j, complex_t& c)
{
complex_t& c1 = m(i/nm,j/nm); // U
const complex_t& c2 = ((i%nm) == (j%nm) ? __c_one__ : __c_zero__); // I
c = c1*c2;
}
cmatrix_t m;
private:
size_t nm;
size_t ni;
};
/**
* kronecker_iui
*/
class kronecker_iui
{
public:
kronecker_iui(const complex_t * m, size_t nm, size_t ni1, size_t ni2) : m(m), nm(nm), ni1(ni1), ni2(ni2)
#ifdef USE_LIBDIVIDE
, fast_ni2(ni2)
#endif
{
}
inline complex_t get(uint64_t i, uint64_t j)
{
// return m((i/ni2)%nm,(j/ni2)%nm);
// return m[((i/ni2)%2)*2+(j/ni2)%2];
// return m[__mod_2(i/ni2)*2+__mod_2((j/ni2))];
#ifdef USE_LIBDIVIDE
return m[__mod_2(i/fast_ni2)*2+__mod_2((j/fast_ni2))];
#else
return m[__mod_2(i/ni2)*2+__mod_2((j/ni2))];
#endif
}
/*
inline void get(size_t i, size_t j, complex_t& c)
{
complex_t& c1 = m(i%nm,j%nm); // U
const complex_t& c2 = ((i/nm) == (j/nm) ? __c_one__ : __c_zero__); // I
c = c1*c2;
}
*/
const complex_t * m;
private:
uint64_t nm;
uint64_t ni1;
uint64_t ni2;
#ifdef USE_LIBDIVIDE
libdivide::divider<uint64_t> fast_ni2;
#endif
};
void printv(cvector_t& v)
{
print("[ ");
for (int i=0; i<v.size(); ++i)
print(v[i].re << ", ");
//print(v[i].real() << ", ");
println(" ]");
}
void mulmv(kronecker& k, cvector_t& v, cvector_t& r)
{
#pragma omp parallel for schedule(static)
for (int i=0; i<v.size(); i++)
{
complex_t s; // = 0;
for (int j=0; j<v.size(); j++)
s += v[j]*(k.get(i,j));
r[i] = s;
}
}
/**
* to be tested for correctness
*/
void mulmv_(kronecker& k, cvector_t& v, cvector_t& r)
{
complex_t s; // = 0;
complex_t x; // = 0;
#pragma omp parallel for private(s,x) schedule(static)
for (int i=0; i<v.size(); i++)
{
s = 0;
for (int j=0; j<v.size(); j++)
{
x = k.get(i,j);
//if ((x.real() != 0) || (x.imag() != 0))
if ((x.re != 0) || (x.im != 0))
s += v[j]*x;
}
r[i] = s;
}
}
void mulmv(kronecker& k, cvector_t& v, cvector_t& r, size_t block_ib, size_t block_ie, size_t block_jb, size_t block_je)
{
for (int i=block_ib; i<block_ie; i++)
{
complex_t s = r[i];
for (int j=block_jb; j<block_je; j++)
s += v[j]*(k.get(i,j));
r[i] = s;
}
}
} // namespace qx
} // namespace linalg
|
quantize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE %
% Q Q U U A A NN N T I ZZ E %
% Q Q U U AAAAA N N N T I ZZZ EEEEE %
% Q QQ U U A A N NN T I ZZ E %
% QQQQ UUU A A N N T IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Methods to Reduce the Number of Unique Colors in an Image %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Realism in computer graphics typically requires using 24 bits/pixel to
% generate an image. Yet many graphic display devices do not contain the
% amount of memory necessary to match the spatial and color resolution of
% the human eye. The Quantize methods takes a 24 bit image and reduces
% the number of colors so it can be displayed on raster device with less
% bits per pixel. In most instances, the quantized image closely
% resembles the original reference image.
%
% A reduction of colors in an image is also desirable for image
% transmission and real-time animation.
%
% QuantizeImage() takes a standard RGB or monochrome images and quantizes
% them down to some fixed number of colors.
%
% For purposes of color allocation, an image is a set of n pixels, where
% each pixel is a point in RGB space. RGB space is a 3-dimensional
% vector space, and each pixel, Pi, is defined by an ordered triple of
% red, green, and blue coordinates, (Ri, Gi, Bi).
%
% Each primary color component (red, green, or blue) represents an
% intensity which varies linearly from 0 to a maximum value, Cmax, which
% corresponds to full saturation of that color. Color allocation is
% defined over a domain consisting of the cube in RGB space with opposite
% vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax =
% 255.
%
% The algorithm maps this domain onto a tree in which each node
% represents a cube within that domain. In the following discussion
% these cubes are defined by the coordinate of two opposite vertices (vertex
% nearest the origin in RGB space and the vertex farthest from the origin).
%
% The tree's root node represents the entire domain, (0,0,0) through
% (Cmax,Cmax,Cmax). Each lower level in the tree is generated by
% subdividing one node's cube into eight smaller cubes of equal size.
% This corresponds to bisecting the parent cube with planes passing
% through the midpoints of each edge.
%
% The basic algorithm operates in three phases: Classification,
% Reduction, and Assignment. Classification builds a color description
% tree for the image. Reduction collapses the tree until the number it
% represents, at most, the number of colors desired in the output image.
% Assignment defines the output image's color map and sets each pixel's
% color by restorage_class in the reduced tree. Our goal is to minimize
% the numerical discrepancies between the original colors and quantized
% colors (quantization error).
%
% Classification begins by initializing a color description tree of
% sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color description
% tree in the storage_class phase for realistic values of Cmax. If
% colors components in the input image are quantized to k-bit precision,
% so that Cmax= 2k-1, the tree would need k levels below the root node to
% allow representing each possible input color in a leaf. This becomes
% prohibitive because the tree's total number of nodes is 1 +
% sum(i=1, k, 8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing the pixel's color. It updates the following data for each
% such node:
%
% n1: Number of pixels whose color is contained in the RGB cube which
% this node represents;
%
% n2: Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb: Sums of the red, green, and blue component values for all
% pixels not classified at a lower depth. The combination of these sums
% and n2 will ultimately characterize the mean color of a set of pixels
% represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the
% quantization error for a node.
%
% Reduction repeatedly prunes the tree until the number of nodes with n2
% > 0 is less than or equal to the maximum number of colors allowed in
% the output image. On any given iteration over the tree, it selects
% those nodes whose E count is minimal for pruning and merges their color
% statistics upward. It uses a pruning threshold, Ep, to govern node
% selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors within
% the cubic volume which the node represents. This includes n1 - n2
% pixels whose colors should be defined by nodes at a lower level in the
% tree.
%
% Assignment generates the output image from the pruned tree. The output
% image consists of two parts: (1) A color map, which is an array of
% color descriptions (RGB triples) for each color present in the output
% image; (2) A pixel array, which represents each pixel as an index
% into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% This method is based on a similar algorithm written by Paul Raveling.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/compare.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
/*
Define declarations.
*/
#if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE)
#define CacheShift 2
#else
#define CacheShift 3
#endif
#define ErrorQueueLength 16
#define MaxNodes 266817
#define MaxTreeDepth 8
#define NodesInAList 1920
/*
Typdef declarations.
*/
typedef struct _DoublePixelPacket
{
double
red,
green,
blue,
alpha;
} DoublePixelPacket;
typedef struct _NodeInfo
{
struct _NodeInfo
*parent,
*child[16];
MagickSizeType
number_unique;
DoublePixelPacket
total_color;
double
quantize_error;
size_t
color_number,
id,
level;
} NodeInfo;
typedef struct _Nodes
{
NodeInfo
*nodes;
struct _Nodes
*next;
} Nodes;
typedef struct _CubeInfo
{
NodeInfo
*root;
size_t
colors,
maximum_colors;
ssize_t
transparent_index;
MagickSizeType
transparent_pixels;
DoublePixelPacket
target;
double
distance,
pruning_threshold,
next_threshold;
size_t
nodes,
free_nodes,
color_number;
NodeInfo
*next_node;
Nodes
*node_queue;
MemoryInfo
*memory_info;
ssize_t
*cache;
DoublePixelPacket
error[ErrorQueueLength];
double
weights[ErrorQueueLength];
QuantizeInfo
*quantize_info;
MagickBooleanType
associate_alpha;
ssize_t
x,
y;
size_t
depth;
MagickOffsetType
offset;
MagickSizeType
span;
} CubeInfo;
/*
Method prototypes.
*/
static CubeInfo
*GetCubeInfo(const QuantizeInfo *,const size_t,const size_t);
static NodeInfo
*GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *);
static MagickBooleanType
AssignImageColors(Image *,CubeInfo *,ExceptionInfo *),
ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *),
DitherImage(Image *,CubeInfo *,ExceptionInfo *),
SetGrayscaleImage(Image *,ExceptionInfo *);
static size_t
DefineImageColormap(Image *,CubeInfo *,NodeInfo *);
static void
ClosestColor(const Image *,CubeInfo *,const NodeInfo *),
DestroyCubeInfo(CubeInfo *),
PruneLevel(CubeInfo *,const NodeInfo *),
PruneToCubeDepth(CubeInfo *,const NodeInfo *),
ReduceImageColors(const Image *,CubeInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireQuantizeInfo() allocates the QuantizeInfo structure.
%
% The format of the AcquireQuantizeInfo method is:
%
% QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
{
QuantizeInfo
*quantize_info;
quantize_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*quantize_info));
GetQuantizeInfo(quantize_info);
if (image_info != (ImageInfo *) NULL)
{
const char
*option;
quantize_info->dither_method=image_info->dither == MagickFalse ?
NoDitherMethod : RiemersmaDitherMethod;
option=GetImageOption(image_info,"dither");
if (option != (const char *) NULL)
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,option);
quantize_info->measure_error=image_info->verbose;
}
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A s s i g n I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AssignImageColors() generates the output image from the pruned tree. The
% output image consists of two parts: (1) A color map, which is an array
% of color descriptions (RGB triples) for each color present in the
% output image; (2) A pixel array, which represents each pixel as an
% index into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% The format of the AssignImageColors() method is:
%
% MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static inline void AssociateAlphaPixel(const Image *image,
const CubeInfo *cube_info,const Quantum *pixel,DoublePixelPacket *alpha_pixel)
{
double
alpha;
if ((cube_info->associate_alpha == MagickFalse) ||
(GetPixelAlpha(image,pixel) == OpaqueAlpha))
{
alpha_pixel->red=(double) GetPixelRed(image,pixel);
alpha_pixel->green=(double) GetPixelGreen(image,pixel);
alpha_pixel->blue=(double) GetPixelBlue(image,pixel);
alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel);
return;
}
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixel));
alpha_pixel->red=alpha*GetPixelRed(image,pixel);
alpha_pixel->green=alpha*GetPixelGreen(image,pixel);
alpha_pixel->blue=alpha*GetPixelBlue(image,pixel);
alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel);
}
static inline void AssociateAlphaPixelInfo(const CubeInfo *cube_info,
const PixelInfo *pixel,DoublePixelPacket *alpha_pixel)
{
double
alpha;
if ((cube_info->associate_alpha == MagickFalse) ||
(pixel->alpha == OpaqueAlpha))
{
alpha_pixel->red=(double) pixel->red;
alpha_pixel->green=(double) pixel->green;
alpha_pixel->blue=(double) pixel->blue;
alpha_pixel->alpha=(double) pixel->alpha;
return;
}
alpha=(double) (QuantumScale*pixel->alpha);
alpha_pixel->red=alpha*pixel->red;
alpha_pixel->green=alpha*pixel->green;
alpha_pixel->blue=alpha*pixel->blue;
alpha_pixel->alpha=(double) pixel->alpha;
}
static inline size_t ColorToNodeId(const CubeInfo *cube_info,
const DoublePixelPacket *pixel,size_t index)
{
size_t
id;
id=(size_t) (((ScaleQuantumToChar(ClampPixel(pixel->red)) >> index) & 0x01) |
((ScaleQuantumToChar(ClampPixel(pixel->green)) >> index) & 0x01) << 1 |
((ScaleQuantumToChar(ClampPixel(pixel->blue)) >> index) & 0x01) << 2);
if (cube_info->associate_alpha != MagickFalse)
id|=((ScaleQuantumToChar(ClampPixel(pixel->alpha)) >> index) & 0x1) << 3;
return(id);
}
static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
#define AssignImageTag "Assign/Image"
ColorspaceType
colorspace;
ssize_t
y;
/*
Allocate image colormap.
*/
colorspace=image->colorspace;
if (cube_info->quantize_info->colorspace != UndefinedColorspace)
(void) TransformImageColorspace(image,cube_info->quantize_info->colorspace,
exception);
if (AcquireImageColormap(image,cube_info->colors,exception) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
image->colors=0;
cube_info->transparent_pixels=0;
cube_info->transparent_index=(-1);
(void) DefineImageColormap(image,cube_info,cube_info->root);
/*
Create a reduced color image.
*/
if (cube_info->quantize_info->dither_method != NoDitherMethod)
(void) DitherImage(image,cube_info,exception);
else
{
CacheView
*image_view;
MagickBooleanType
status;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CubeInfo
cube;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
count;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
cube=(*cube_info);
for (x=0; x < (ssize_t) image->columns; x+=count)
{
DoublePixelPacket
pixel;
register const NodeInfo
*node_info;
register ssize_t
i;
size_t
id,
index;
/*
Identify the deepest node containing the pixel's color.
*/
for (count=1; (x+count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,q+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,q,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,&cube,q,&pixel);
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+
1.0);
ClosestColor(image,&cube,node_info->parent);
index=cube.color_number;
for (i=0; i < (ssize_t) count; i++)
{
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(
image->colormap[index].red),q);
SetPixelGreen(image,ClampToQuantum(
image->colormap[index].green),q);
SetPixelBlue(image,ClampToQuantum(
image->colormap[index].blue),q);
if (cube.associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(
image->colormap[index].alpha),q);
}
q+=GetPixelChannels(image);
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
}
if (cube_info->quantize_info->measure_error != MagickFalse)
(void) GetImageQuantizeError(image,exception);
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
{
double
intensity;
/*
Monochrome image.
*/
intensity=GetPixelInfoLuma(image->colormap+0) > QuantumRange/2.0 ? 0.0 :
QuantumRange;
if (image->colors > 1)
{
intensity=0.0;
if (GetPixelInfoLuma(image->colormap+0) >
GetPixelInfoLuma(image->colormap+1))
intensity=(double) QuantumRange;
}
image->colormap[0].red=intensity;
image->colormap[0].green=intensity;
image->colormap[0].blue=intensity;
if (image->colors > 1)
{
image->colormap[1].red=(double) QuantumRange-intensity;
image->colormap[1].green=(double) QuantumRange-intensity;
image->colormap[1].blue=(double) QuantumRange-intensity;
}
}
(void) SyncImage(image,exception);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(IssRGBCompatibleColorspace(colorspace) == MagickFalse))
(void) TransformImageColorspace(image,colorspace,exception);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClassifyImageColors() begins by initializing a color description tree
% of sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color
% description tree in the storage_class phase for realistic values of
% Cmax. If colors components in the input image are quantized to k-bit
% precision, so that Cmax= 2k-1, the tree would need k levels below the
% root node to allow representing each possible input color in a leaf.
% This becomes prohibitive because the tree's total number of nodes is
% 1 + sum(i=1,k,8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing It updates the following data for each such node:
%
% n1 : Number of pixels whose color is contained in the RGB cube
% which this node represents;
%
% n2 : Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb : Sums of the red, green, and blue component values for
% all pixels not classified at a lower depth. The combination of
% these sums and n2 will ultimately characterize the mean color of a
% set of pixels represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the quantization
% error for a node.
%
% The format of the ClassifyImageColors() method is:
%
% MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
% const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o image: the image.
%
*/
static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info)
{
MagickBooleanType
associate_alpha;
associate_alpha=image->alpha_trait == BlendPixelTrait ? MagickTrue :
MagickFalse;
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
associate_alpha=MagickFalse;
cube_info->associate_alpha=associate_alpha;
}
static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
const Image *image,ExceptionInfo *exception)
{
#define ClassifyImageTag "Classify/Image"
CacheView
*image_view;
DoublePixelPacket
error,
mid,
midpoint,
pixel;
MagickBooleanType
proceed;
double
bisect;
NodeInfo
*node_info;
size_t
count,
id,
index,
level;
ssize_t
y;
/*
Classify the first cube_info->maximum_colors colors to a tree depth of 8.
*/
SetAssociatedAlpha(image,cube_info);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,
cube_info->quantize_info->colorspace,exception);
else
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace((Image *) image,sRGBColorspace,exception);
midpoint.red=(double) QuantumRange/2.0;
midpoint.green=(double) QuantumRange/2.0;
midpoint.blue=(double) QuantumRange/2.0;
midpoint.alpha=(double) QuantumRange/2.0;
error.alpha=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,p,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((double) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= MaxTreeDepth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.alpha+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
continue;
}
if (level == MaxTreeDepth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.alpha=QuantumScale*(pixel.alpha-mid.alpha);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.alpha*error.alpha);
if (IsNaN(distance))
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel(pixel.alpha);
else
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel((MagickRealType) OpaqueAlpha);
p+=count*GetPixelChannels(image);
}
if (cube_info->colors > cube_info->maximum_colors)
{
PruneToCubeDepth(cube_info,cube_info->root);
break;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
for (y++; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
{
PixelInfo
packet;
GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet);
if (IsPixelEquivalent(image,p,&packet) == MagickFalse)
break;
}
AssociateAlphaPixel(image,cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((double) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= cube_info->depth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.alpha+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
image->filename);
continue;
}
if (level == cube_info->depth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.alpha=QuantumScale*(pixel.alpha-mid.alpha);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.alpha*error.alpha);
if (IsNaN(distance) != MagickFalse)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel(pixel.alpha);
else
node_info->total_color.alpha+=count*QuantumScale*
ClampPixel((MagickRealType) OpaqueAlpha);
p+=count*GetPixelChannels(image);
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,sRGBColorspace,exception);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneQuantizeInfo() makes a duplicate of the given quantize info structure,
% or if quantize info is NULL, a new one.
%
% The format of the CloneQuantizeInfo method is:
%
% QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o clone_info: Method CloneQuantizeInfo returns a duplicate of the given
% quantize info, or if image info is NULL a new one.
%
% o quantize_info: a structure of type info.
%
*/
MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
{
QuantizeInfo
*clone_info;
clone_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetQuantizeInfo(clone_info);
if (quantize_info == (QuantizeInfo *) NULL)
return(clone_info);
clone_info->number_colors=quantize_info->number_colors;
clone_info->tree_depth=quantize_info->tree_depth;
clone_info->dither_method=quantize_info->dither_method;
clone_info->colorspace=quantize_info->colorspace;
clone_info->measure_error=quantize_info->measure_error;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o s e s t C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClosestColor() traverses the color cube tree at a particular node and
% determines which colormap entry best represents the input color.
%
% The format of the ClosestColor method is:
%
% void ClosestColor(const Image *image,CubeInfo *cube_info,
% const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void ClosestColor(const Image *image,CubeInfo *cube_info,
const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
ClosestColor(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
double
pixel;
register double
alpha,
beta,
distance;
register DoublePixelPacket
*magick_restrict q;
register PixelInfo
*magick_restrict p;
/*
Determine if this color is "closest".
*/
p=image->colormap+node_info->color_number;
q=(&cube_info->target);
alpha=1.0;
beta=1.0;
if (cube_info->associate_alpha != MagickFalse)
{
alpha=(double) (QuantumScale*p->alpha);
beta=(double) (QuantumScale*q->alpha);
}
pixel=alpha*p->red-beta*q->red;
distance=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*p->green-beta*q->green;
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*p->blue-beta*q->blue;
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
if (cube_info->associate_alpha != MagickFalse)
{
pixel=p->alpha-q->alpha;
distance+=pixel*pixel;
}
if (distance <= cube_info->distance)
{
cube_info->distance=distance;
cube_info->color_number=node_info->color_number;
}
}
}
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p r e s s I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompressImageColormap() compresses an image colormap by removing any
% duplicate or unused color entries.
%
% The format of the CompressImageColormap method is:
%
% MagickBooleanType CompressImageColormap(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType CompressImageColormap(Image *image,
ExceptionInfo *exception)
{
QuantizeInfo
quantize_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsPaletteImage(image) == MagickFalse)
return(MagickFalse);
GetQuantizeInfo(&quantize_info);
quantize_info.number_colors=image->colors;
quantize_info.tree_depth=MaxTreeDepth;
return(QuantizeImage(&quantize_info,image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineImageColormap() traverses the color cube tree and notes each colormap
% entry. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero. DefineImageColormap() returns the number of
% colors in the image colormap.
%
% The format of the DefineImageColormap method is:
%
% size_t DefineImageColormap(Image *image,CubeInfo *cube_info,
% NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static size_t DefineImageColormap(Image *image,CubeInfo *cube_info,
NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
(void) DefineImageColormap(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
register double
alpha;
register PixelInfo
*magick_restrict q;
/*
Colormap entry is defined by the mean color in this cube.
*/
q=image->colormap+image->colors;
alpha=(double) ((MagickOffsetType) node_info->number_unique);
alpha=PerceptibleReciprocal(alpha);
if (cube_info->associate_alpha == MagickFalse)
{
q->red=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.blue);
q->alpha=(double) OpaqueAlpha;
}
else
{
double
opacity;
opacity=(double) (alpha*QuantumRange*node_info->total_color.alpha);
q->alpha=(double) ClampToQuantum(opacity);
if (q->alpha == OpaqueAlpha)
{
q->red=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*QuantumRange*
node_info->total_color.blue);
}
else
{
double
gamma;
gamma=(double) (QuantumScale*q->alpha);
gamma=PerceptibleReciprocal(gamma);
q->red=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.red);
q->green=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.green);
q->blue=(double) ClampToQuantum(alpha*gamma*QuantumRange*
node_info->total_color.blue);
if (node_info->number_unique > cube_info->transparent_pixels)
{
cube_info->transparent_pixels=node_info->number_unique;
cube_info->transparent_index=(ssize_t) image->colors;
}
}
}
node_info->color_number=image->colors++;
}
return(image->colors);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyCubeInfo() deallocates memory associated with an image.
%
% The format of the DestroyCubeInfo method is:
%
% DestroyCubeInfo(CubeInfo *cube_info)
%
% A description of each parameter follows:
%
% o cube_info: the address of a structure of type CubeInfo.
%
*/
static void DestroyCubeInfo(CubeInfo *cube_info)
{
register Nodes
*nodes;
/*
Release color cube tree storage.
*/
do
{
nodes=cube_info->node_queue->next;
cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory(
cube_info->node_queue->nodes);
cube_info->node_queue=(Nodes *) RelinquishMagickMemory(
cube_info->node_queue);
cube_info->node_queue=nodes;
} while (cube_info->node_queue != (Nodes *) NULL);
if (cube_info->memory_info != (MemoryInfo *) NULL)
cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info);
cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info);
cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo
% structure.
%
% The format of the DestroyQuantizeInfo method is:
%
% QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
*/
MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
quantize_info->signature=(~MagickCoreSignature);
quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info);
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DitherImage() distributes the difference between an original image and
% the corresponding color reduced algorithm to neighboring pixels using
% serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns
% MagickTrue if the image is dithered otherwise MagickFalse.
%
% The format of the DitherImage method is:
%
% MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels)
{
register ssize_t
i;
assert(pixels != (DoublePixelPacket **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (DoublePixelPacket *) NULL)
pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static DoublePixelPacket **AcquirePixelThreadSet(const size_t count)
{
DoublePixelPacket
**pixels;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (DoublePixelPacket **) NULL)
return((DoublePixelPacket **) NULL);
(void) memset(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,2*
sizeof(**pixels));
if (pixels[i] == (DoublePixelPacket *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static inline ssize_t CacheOffset(CubeInfo *cube_info,
const DoublePixelPacket *pixel)
{
#define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift)))
#define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift)))
#define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift)))
#define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift)))
ssize_t
offset;
offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) |
GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) |
BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue))));
if (cube_info->associate_alpha != MagickFalse)
offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->alpha)));
return(offset);
}
static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
const char
*artifact;
double
amount;
DoublePixelPacket
**pixels;
MagickBooleanType
status;
ssize_t
y;
/*
Distribute quantization error using Floyd-Steinberg.
*/
pixels=AcquirePixelThreadSet(image->columns);
if (pixels == (DoublePixelPacket **) NULL)
return(MagickFalse);
status=MagickTrue;
amount=1.0;
artifact=GetImageArtifact(image,"dither:diffusion-amount");
if (artifact != (const char *) NULL)
amount=StringToDoubleInterval(artifact,1.0);
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
CubeInfo
cube;
DoublePixelPacket
*current,
*previous;
register Quantum
*magick_restrict q;
register ssize_t
x;
size_t
index;
ssize_t
v;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
cube=(*cube_info);
current=pixels[id]+(y & 0x01)*image->columns;
previous=pixels[id]+((y+1) & 0x01)*image->columns;
v=(ssize_t) ((y & 0x01) != 0 ? -1 : 1);
for (x=0; x < (ssize_t) image->columns; x++)
{
DoublePixelPacket
color,
pixel;
register ssize_t
i;
ssize_t
u;
u=(y & 0x01) != 0 ? (ssize_t) image->columns-1-x : x;
AssociateAlphaPixel(image,&cube,q+u*GetPixelChannels(image),&pixel);
if (x > 0)
{
pixel.red+=7.0*amount*current[u-v].red/16;
pixel.green+=7.0*amount*current[u-v].green/16;
pixel.blue+=7.0*amount*current[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=7.0*amount*current[u-v].alpha/16;
}
if (y > 0)
{
if (x < (ssize_t) (image->columns-1))
{
pixel.red+=previous[u+v].red/16;
pixel.green+=previous[u+v].green/16;
pixel.blue+=previous[u+v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=previous[u+v].alpha/16;
}
pixel.red+=5.0*amount*previous[u].red/16;
pixel.green+=5.0*amount*previous[u].green/16;
pixel.blue+=5.0*amount*previous[u].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=5.0*amount*previous[u].alpha/16;
if (x > 0)
{
pixel.red+=3.0*amount*previous[u-v].red/16;
pixel.green+=3.0*amount*previous[u-v].green/16;
pixel.blue+=3.0*amount*previous[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.alpha+=3.0*amount*previous[u-v].alpha/16;
}
}
pixel.red=(double) ClampPixel(pixel.red);
pixel.green=(double) ClampPixel(pixel.green);
pixel.blue=(double) ClampPixel(pixel.blue);
if (cube.associate_alpha != MagickFalse)
pixel.alpha=(double) ClampPixel(pixel.alpha);
i=CacheOffset(&cube,&pixel);
if (cube.cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
node_id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
node_id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[node_id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[node_id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+
1.0);
ClosestColor(image,&cube,node_info->parent);
cube.cache[i]=(ssize_t) cube.color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) cube.cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q+u*GetPixelChannels(image));
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(image->colormap[index].red),
q+u*GetPixelChannels(image));
SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),
q+u*GetPixelChannels(image));
SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),
q+u*GetPixelChannels(image));
if (cube.associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),
q+u*GetPixelChannels(image));
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
/*
Store the error.
*/
AssociateAlphaPixelInfo(&cube,image->colormap+index,&color);
current[u].red=pixel.red-color.red;
current[u].green=pixel.green-color.green;
current[u].blue=pixel.blue-color.blue;
if (cube.associate_alpha != MagickFalse)
current[u].alpha=pixel.alpha-color.alpha;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
image_view=DestroyCacheView(image_view);
pixels=DestroyPixelThreadSet(pixels);
return(MagickTrue);
}
static MagickBooleanType
RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int,
ExceptionInfo *);
static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info,
const size_t level,const unsigned int direction,ExceptionInfo *exception)
{
if (level == 1)
switch (direction)
{
case WestGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
break;
}
case EastGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
break;
}
case NorthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
break;
}
case SouthGravity:
{
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
break;
}
default:
break;
}
else
switch (direction)
{
case WestGravity:
{
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
break;
}
case EastGravity:
{
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
break;
}
case NorthGravity:
{
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,EastGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,NorthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
break;
}
case SouthGravity:
{
Riemersma(image,image_view,cube_info,level-1,EastGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,NorthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,WestGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,SouthGravity,
exception);
(void) RiemersmaDither(image,image_view,cube_info,SouthGravity,
exception);
Riemersma(image,image_view,cube_info,level-1,WestGravity,
exception);
break;
}
default:
break;
}
}
static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view,
CubeInfo *cube_info,const unsigned int direction,ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
DoublePixelPacket
color,
pixel;
MagickBooleanType
proceed;
register CubeInfo
*p;
size_t
index;
p=cube_info;
if ((p->x >= 0) && (p->x < (ssize_t) image->columns) &&
(p->y >= 0) && (p->y < (ssize_t) image->rows))
{
register Quantum
*magick_restrict q;
register ssize_t
i;
/*
Distribute error.
*/
q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
AssociateAlphaPixel(image,cube_info,q,&pixel);
for (i=0; i < ErrorQueueLength; i++)
{
pixel.red+=p->weights[i]*p->error[i].red;
pixel.green+=p->weights[i]*p->error[i].green;
pixel.blue+=p->weights[i]*p->error[i].blue;
if (cube_info->associate_alpha != MagickFalse)
pixel.alpha+=p->weights[i]*p->error[i].alpha;
}
pixel.red=(double) ClampPixel(pixel.red);
pixel.green=(double) ClampPixel(pixel.green);
pixel.blue=(double) ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
pixel.alpha=(double) ClampPixel(pixel.alpha);
i=CacheOffset(cube_info,&pixel);
if (p->cache[i] < 0)
{
register NodeInfo
*node_info;
register size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=p->root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(cube_info,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
p->target=pixel;
p->distance=(double) (4.0*(QuantumRange+1.0)*((double)
QuantumRange+1.0)+1.0);
ClosestColor(image,p,node_info->parent);
p->cache[i]=(ssize_t) p->color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) p->cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(image,(Quantum) index,q);
if (cube_info->quantize_info->measure_error == MagickFalse)
{
SetPixelRed(image,ClampToQuantum(image->colormap[index].red),q);
SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),q);
SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),q);
if (cube_info->associate_alpha != MagickFalse)
SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),q);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
return(MagickFalse);
/*
Propagate the error as the last entry of the error queue.
*/
(void) memmove(p->error,p->error+1,(ErrorQueueLength-1)*
sizeof(p->error[0]));
AssociateAlphaPixelInfo(cube_info,image->colormap+index,&color);
p->error[ErrorQueueLength-1].red=pixel.red-color.red;
p->error[ErrorQueueLength-1].green=pixel.green-color.green;
p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue;
if (cube_info->associate_alpha != MagickFalse)
p->error[ErrorQueueLength-1].alpha=pixel.alpha-color.alpha;
proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span);
if (proceed == MagickFalse)
return(MagickFalse);
p->offset++;
}
switch (direction)
{
case WestGravity: p->x--; break;
case EastGravity: p->x++; break;
case NorthGravity: p->y--; break;
case SouthGravity: p->y++; break;
}
return(MagickTrue);
}
static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
depth;
if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod)
return(FloydSteinbergDither(image,cube_info,exception));
/*
Distribute quantization error along a Hilbert curve.
*/
(void) memset(cube_info->error,0,ErrorQueueLength*sizeof(*cube_info->error));
cube_info->x=0;
cube_info->y=0;
i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows);
for (depth=1; i != 0; depth++)
i>>=1;
if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows))
depth++;
cube_info->offset=0;
cube_info->span=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,exception);
if (depth > 1)
Riemersma(image,image_view,cube_info,depth-1,NorthGravity,exception);
status=RiemersmaDither(image,image_view,cube_info,ForgetGravity,exception);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCubeInfo() initialize the Cube data structure.
%
% The format of the GetCubeInfo method is:
%
% CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info,
% const size_t depth,const size_t maximum_colors)
%
% A description of each parameter follows.
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o depth: Normally, this integer value is zero or one. A zero or
% one tells Quantize to choose a optimal tree depth of Log4(number_colors).
% A tree of this depth generally allows the best representation of the
% reference image with the least amount of memory and the fastest
% computational speed. In some cases, such as an image with low color
% dispersion (a few number of colors), a value other than
% Log4(number_colors) is required. To expand the color tree completely,
% use a value of 8.
%
% o maximum_colors: maximum colors.
%
*/
static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info,
const size_t depth,const size_t maximum_colors)
{
CubeInfo
*cube_info;
double
sum,
weight;
register ssize_t
i;
size_t
length;
/*
Initialize tree to describe color cube_info.
*/
cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info));
if (cube_info == (CubeInfo *) NULL)
return((CubeInfo *) NULL);
(void) memset(cube_info,0,sizeof(*cube_info));
cube_info->depth=depth;
if (cube_info->depth > MaxTreeDepth)
cube_info->depth=MaxTreeDepth;
if (cube_info->depth < 2)
cube_info->depth=2;
cube_info->maximum_colors=maximum_colors;
/*
Initialize root node.
*/
cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL);
if (cube_info->root == (NodeInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->root->parent=cube_info->root;
cube_info->quantize_info=CloneQuantizeInfo(quantize_info);
if (cube_info->quantize_info->dither_method == NoDitherMethod)
return(cube_info);
/*
Initialize dither resources.
*/
length=(size_t) (1UL << (4*(8-CacheShift)));
cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache));
if (cube_info->memory_info == (MemoryInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info);
/*
Initialize color cache.
*/
(void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)*length);
/*
Distribute weights along a curve of exponential decay.
*/
weight=1.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[ErrorQueueLength-i-1]=PerceptibleReciprocal(weight);
weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0));
}
/*
Normalize the weighting factors.
*/
weight=0.0;
for (i=0; i < ErrorQueueLength; i++)
weight+=cube_info->weights[i];
sum=0.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[i]/=weight;
sum+=cube_info->weights[i];
}
cube_info->weights[0]+=1.0-sum;
return(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t N o d e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNodeInfo() allocates memory for a new node in the color cube tree and
% presets all fields to zero.
%
% The format of the GetNodeInfo method is:
%
% NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
% const size_t level,NodeInfo *parent)
%
% A description of each parameter follows.
%
% o node: The GetNodeInfo method returns a pointer to a queue of nodes.
%
% o id: Specifies the child number of the node.
%
% o level: Specifies the level in the storage_class the node resides.
%
*/
static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
const size_t level,NodeInfo *parent)
{
NodeInfo
*node_info;
if (cube_info->free_nodes == 0)
{
Nodes
*nodes;
/*
Allocate a new queue of nodes.
*/
nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes));
if (nodes == (Nodes *) NULL)
return((NodeInfo *) NULL);
nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList,
sizeof(*nodes->nodes));
if (nodes->nodes == (NodeInfo *) NULL)
return((NodeInfo *) NULL);
nodes->next=cube_info->node_queue;
cube_info->node_queue=nodes;
cube_info->next_node=nodes->nodes;
cube_info->free_nodes=NodesInAList;
}
cube_info->nodes++;
cube_info->free_nodes--;
node_info=cube_info->next_node++;
(void) memset(node_info,0,sizeof(*node_info));
node_info->parent=parent;
node_info->id=id;
node_info->level=level;
return(node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t i z e E r r o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantizeError() measures the difference between the original
% and quantized images. This difference is the total quantization error.
% The error is computed by summing over all pixels in an image the distance
% squared in RGB space between each reference pixel value and its quantized
% value. These values are computed:
%
% o mean_error_per_pixel: This value is the mean error for any single
% pixel in the image.
%
% o normalized_mean_square_error: This value is the normalized mean
% quantization error for any single pixel in the image. This distance
% measure is normalized to a range between 0 and 1. It is independent
% of the range of red, green, and blue values in the image.
%
% o normalized_maximum_square_error: Thsi value is the normalized
% maximum quantization error for any single pixel in the image. This
% distance measure is normalized to a range between 0 and 1. It is
% independent of the range of red, green, and blue values in your image.
%
% The format of the GetImageQuantizeError method is:
%
% MagickBooleanType GetImageQuantizeError(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageQuantizeError(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
alpha,
area,
beta,
distance,
maximum_error,
mean_error,
mean_error_per_pixel;
ssize_t
index,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->total_colors=GetNumberColors(image,(FILE *) NULL,exception);
(void) memset(&image->error,0,sizeof(image->error));
if (image->storage_class == DirectClass)
return(MagickTrue);
alpha=1.0;
beta=1.0;
area=3.0*image->columns*image->rows;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(ssize_t) GetPixelIndex(image,p);
if (image->alpha_trait == BlendPixelTrait)
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,p));
beta=(double) (QuantumScale*image->colormap[index].alpha);
}
distance=fabs((double) (alpha*GetPixelRed(image,p)-beta*
image->colormap[index].red));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelGreen(image,p)-beta*
image->colormap[index].green));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelBlue(image,p)-beta*
image->colormap[index].blue));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area;
image->error.normalized_mean_error=(double) QuantumScale*QuantumScale*
mean_error/area;
image->error.normalized_maximum_error=(double) QuantumScale*maximum_error;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetQuantizeInfo() initializes the QuantizeInfo structure.
%
% The format of the GetQuantizeInfo method is:
%
% GetQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to a QuantizeInfo structure.
%
*/
MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
(void) memset(quantize_info,0,sizeof(*quantize_info));
quantize_info->number_colors=256;
quantize_info->dither_method=RiemersmaDitherMethod;
quantize_info->colorspace=UndefinedColorspace;
quantize_info->measure_error=MagickFalse;
quantize_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% K m e a n s I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% KmeansImage() applies k-means color reduction to an image. This is a
% colorspace clustering or segmentation technique.
%
% The format of the KmeansImage method is:
%
% MagickBooleanType KmeansImage(Image *image,const size_t number_colors,
% const size_t max_iterations,const double tolerance,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_colors: number of colors to use as seeds.
%
% o max_iterations: maximum number of iterations while converging.
%
% o tolerance: the maximum tolerance.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _KmeansInfo
{
double
red,
green,
blue,
alpha,
black,
count,
distortion;
} KmeansInfo;
static KmeansInfo **DestroyKmeansThreadSet(KmeansInfo **kmeans_info)
{
register ssize_t
i;
assert(kmeans_info != (KmeansInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (kmeans_info[i] != (KmeansInfo *) NULL)
kmeans_info[i]=(KmeansInfo *) RelinquishMagickMemory(kmeans_info[i]);
kmeans_info=(KmeansInfo **) RelinquishMagickMemory(kmeans_info);
return(kmeans_info);
}
static KmeansInfo **AcquireKmeansThreadSet(const size_t number_colors)
{
KmeansInfo
**kmeans_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
kmeans_info=(KmeansInfo **) AcquireQuantumMemory(number_threads,
sizeof(*kmeans_info));
if (kmeans_info == (KmeansInfo **) NULL)
return((KmeansInfo **) NULL);
(void) memset(kmeans_info,0,number_threads*sizeof(*kmeans_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
kmeans_info[i]=(KmeansInfo *) AcquireQuantumMemory(number_colors,
sizeof(**kmeans_info));
if (kmeans_info[i] == (KmeansInfo *) NULL)
return(DestroyKmeansThreadSet(kmeans_info));
}
return(kmeans_info);
}
static inline double KmeansMetric(const Image *magick_restrict image,
const Quantum *magick_restrict p,const PixelInfo *magick_restrict q)
{
double
gamma,
metric,
pixel;
gamma=1.0;
metric=0.0;
if ((image->alpha_trait != UndefinedPixelTrait) ||
(q->alpha_trait != UndefinedPixelTrait))
{
double
que_alpha;
que_alpha=q->alpha_trait != UndefinedPixelTrait ? q->alpha : OpaqueAlpha;
pixel=QuantumScale*(GetPixelAlpha(image,p)-que_alpha);
metric+=pixel*pixel;
gamma*=QuantumScale*GetPixelAlpha(image,p);
gamma*=QuantumScale*que_alpha;
}
if (image->colorspace == CMYKColorspace)
{
pixel=QuantumScale*(GetPixelBlack(image,p)-q->black);
metric+=gamma*pixel*pixel;
gamma*=QuantumScale*(QuantumRange-GetPixelBlack(image,p));
gamma*=QuantumScale*(QuantumRange-q->black);
}
metric*=3.0;
pixel=QuantumScale*(GetPixelRed(image,p)-q->red);
if ((image->colorspace == HSLColorspace) ||
(image->colorspace == HSBColorspace) ||
(image->colorspace == HWBColorspace))
{
if (fabs((double) pixel) > 0.5)
pixel-=0.5;
pixel*=2.0;
}
metric+=gamma*pixel*pixel;
pixel=QuantumScale*(GetPixelGreen(image,p)-q->green);
metric+=gamma*pixel*pixel;
pixel=QuantumScale*(GetPixelBlue(image,p)-q->blue);
metric+=gamma*pixel*pixel;
return(metric);
}
MagickExport MagickBooleanType KmeansImage(Image *image,
const size_t number_colors,const size_t max_iterations,const double tolerance,
ExceptionInfo *exception)
{
#define KmeansImageTag "Kmeans/Image"
#define RandomColorComponent(info) (QuantumRange*GetPseudoRandomValue(info))
CacheView
*image_view;
const char
*colors;
double
previous_tolerance;
KmeansInfo
**kmeans_pixels;
MagickBooleanType
verbose,
status;
register ssize_t
n;
size_t
number_threads;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=AcquireImageColormap(image,number_colors,exception);
if (status == MagickFalse)
return(status);
colors=GetImageArtifact(image,"kmeans:seed-colors");
if (colors == (const char *) NULL)
{
CubeInfo
*cube_info;
QuantizeInfo
*quantize_info;
size_t
colors,
depth;
/*
Seed clusters from color quantization.
*/
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->number_colors=number_colors;
quantize_info->dither_method=NoDitherMethod;
colors=number_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
cube_info=GetCubeInfo(quantize_info,depth,number_colors);
if (cube_info == (CubeInfo *) NULL)
{
quantize_info=DestroyQuantizeInfo(quantize_info);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=ClassifyImageColors(cube_info,image,exception);
if (status != MagickFalse)
{
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
image->colors=0;
status=DefineImageColormap(image,cube_info,cube_info->root);
}
DestroyCubeInfo(cube_info);
quantize_info=DestroyQuantizeInfo(quantize_info);
if (status == MagickFalse)
return(status);
}
else
{
char
color[MagickPathExtent];
register const char
*p;
/*
Seed clusters from color list (e.g. red;green;blue).
*/
for (n=0, p=colors; n < (ssize_t) image->colors; n++)
{
register const char
*q;
for (q=p; *q != '\0'; q++)
if (*q == ';')
break;
(void) CopyMagickString(color,p,(size_t) MagickMin(q-p+1,
MagickPathExtent));
(void) QueryColorCompliance(color,AllCompliance,image->colormap+n,
exception);
if (*q == '\0')
{
n++;
break;
}
p=q+1;
}
if (n < (ssize_t) image->colors)
{
RandomInfo
*random_info;
/*
Seed clusters from random values.
*/
random_info=AcquireRandomInfo();
for ( ; n < (ssize_t) image->colors; n++)
{
(void) QueryColorCompliance("#000",AllCompliance,image->colormap+n,
exception);
image->colormap[n].red=RandomColorComponent(random_info);
image->colormap[n].green=RandomColorComponent(random_info);
image->colormap[n].blue=RandomColorComponent(random_info);
if (image->alpha_trait != BlendPixelTrait)
image->colormap[n].alpha=RandomColorComponent(random_info);
if (image->colorspace == CMYKColorspace)
image->colormap[n].black=RandomColorComponent(random_info);
}
random_info=DestroyRandomInfo(random_info);
}
}
/*
Iterative refinement.
*/
kmeans_pixels=AcquireKmeansThreadSet(number_colors);
if (kmeans_pixels == (KmeansInfo **) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
previous_tolerance=0.0;
verbose=IsStringTrue(GetImageArtifact(image,"debug"));
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
image_view=AcquireAuthenticCacheView(image,exception);
for (n=0; n < (ssize_t) max_iterations; n++)
{
double
distortion;
register ssize_t
i;
ssize_t
y;
for (i=0; i < (ssize_t) number_threads; i++)
(void) memset(kmeans_pixels[i],0,image->colors*sizeof(*kmeans_pixels[i]));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
min_distance;
register ssize_t
i;
ssize_t
j;
/*
Assign each pixel whose mean has the least squared color distance.
*/
j=0;
min_distance=KmeansMetric(image,q,image->colormap+0);
for (i=1; i < (ssize_t) image->colors; i++)
{
double
distance;
if (min_distance <= MagickEpsilon)
break;
distance=KmeansMetric(image,q,image->colormap+i);
if (distance < min_distance)
{
min_distance=distance;
j=i;
}
}
kmeans_pixels[id][j].red+=QuantumScale*GetPixelRed(image,q);
kmeans_pixels[id][j].green+=QuantumScale*GetPixelGreen(image,q);
kmeans_pixels[id][j].blue+=QuantumScale*GetPixelBlue(image,q);
if (image->alpha_trait != BlendPixelTrait)
kmeans_pixels[id][j].alpha+=QuantumScale*GetPixelAlpha(image,q);
if (image->colorspace == CMYKColorspace)
kmeans_pixels[id][j].black+=QuantumScale*GetPixelBlack(image,q);
kmeans_pixels[id][j].count++;
kmeans_pixels[id][j].distortion+=min_distance;
SetPixelIndex(image,(Quantum) j,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
if (status == MagickFalse)
break;
/*
Reduce sums to [0] entry.
*/
for (i=1; i < (ssize_t) number_threads; i++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) image->colors; j++)
{
kmeans_pixels[0][j].red+=kmeans_pixels[i][j].red;
kmeans_pixels[0][j].green+=kmeans_pixels[i][j].green;
kmeans_pixels[0][j].blue+=kmeans_pixels[i][j].blue;
if (image->alpha_trait != BlendPixelTrait)
kmeans_pixels[0][j].alpha+=kmeans_pixels[i][j].alpha;
if (image->colorspace == CMYKColorspace)
kmeans_pixels[0][j].black+=kmeans_pixels[i][j].black;
kmeans_pixels[0][j].count+=kmeans_pixels[i][j].count;
kmeans_pixels[0][j].distortion+=kmeans_pixels[i][j].distortion;
}
}
/*
Calculate the new means (centroids) of the pixels in the new clusters.
*/
distortion=0.0;
for (i=0; i < (ssize_t) image->colors; i++)
{
double
gamma;
gamma=PerceptibleReciprocal((double) kmeans_pixels[0][i].count);
image->colormap[i].red=gamma*QuantumRange*kmeans_pixels[0][i].red;
image->colormap[i].green=gamma*QuantumRange*kmeans_pixels[0][i].green;
image->colormap[i].blue=gamma*QuantumRange*kmeans_pixels[0][i].blue;
if (image->alpha_trait != BlendPixelTrait)
image->colormap[i].alpha=gamma*QuantumRange*kmeans_pixels[0][i].alpha;
if (image->colorspace == CMYKColorspace)
image->colormap[i].black=gamma*QuantumRange*kmeans_pixels[0][i].black;
distortion+=kmeans_pixels[0][i].distortion;
}
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,"distortion[%.20g]: %*g %*g\n",(double) n,
GetMagickPrecision(),distortion,GetMagickPrecision(),
fabs(distortion-previous_tolerance));
if (fabs(distortion-previous_tolerance) <= tolerance)
break;
previous_tolerance=distortion;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,KmeansImageTag,(MagickOffsetType) n,
max_iterations);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
kmeans_pixels=DestroyKmeansThreadSet(kmeans_pixels);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
(void) SetImageProgress(image,KmeansImageTag,(MagickOffsetType)
max_iterations-1,max_iterations);
if (status == MagickFalse)
return(status);
return(SyncImage(image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o s t e r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PosterizeImage() reduces the image to a limited number of colors for a
% "poster" effect.
%
% The format of the PosterizeImage method is:
%
% MagickBooleanType PosterizeImage(Image *image,const size_t levels,
% const DitherMethod dither_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to an Image structure.
%
% o levels: Number of color levels allowed in each channel. Very low values
% (2, 3, or 4) have the most visible effect.
%
% o dither_method: choose from UndefinedDitherMethod, NoDitherMethod,
% RiemersmaDitherMethod, FloydSteinbergDitherMethod.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels,
const DitherMethod dither_method,ExceptionInfo *exception)
{
#define PosterizeImageTag "Posterize/Image"
#define PosterizePixel(pixel) ClampToQuantum((MagickRealType) QuantumRange*( \
MagickRound(QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1))
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
QuantizeInfo
*quantize_info;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (image->storage_class == PseudoClass)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Posterize colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double)
PosterizePixel(image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double)
PosterizePixel(image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double)
PosterizePixel(image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double)
PosterizePixel(image->colormap[i].alpha);
}
/*
Posterize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
SetPixelRed(image,PosterizePixel(GetPixelRed(image,q)),q);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
SetPixelGreen(image,PosterizePixel(GetPixelGreen(image,q)),q);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
SetPixelBlue(image,PosterizePixel(GetPixelBlue(image,q)),q);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelBlack(image,PosterizePixel(GetPixelBlack(image,q)),q);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait == BlendPixelTrait))
SetPixelAlpha(image,PosterizePixel(GetPixelAlpha(image,q)),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,PosterizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels*
levels,MaxColormapSize+1);
quantize_info->dither_method=dither_method;
quantize_info->tree_depth=MaxTreeDepth;
status=QuantizeImage(quantize_info,image,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e C h i l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneChild() deletes the given node and merges its statistics into its
% parent.
%
% The format of the PruneSubtree method is:
%
% PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
{
NodeInfo
*parent;
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneChild(cube_info,node_info->child[i]);
if (cube_info->nodes > cube_info->maximum_colors)
{
/*
Merge color statistics into parent.
*/
parent=node_info->parent;
parent->number_unique+=node_info->number_unique;
parent->total_color.red+=node_info->total_color.red;
parent->total_color.green+=node_info->total_color.green;
parent->total_color.blue+=node_info->total_color.blue;
parent->total_color.alpha+=node_info->total_color.alpha;
parent->child[node_info->id]=(NodeInfo *) NULL;
cube_info->nodes--;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e L e v e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneLevel() deletes all nodes at the bottom level of the color tree merging
% their color statistics into their parent node.
%
% The format of the PruneLevel method is:
%
% PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneLevel(cube_info,node_info->child[i]);
if (node_info->level == cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e T o C u b e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneToCubeDepth() deletes any nodes at a depth greater than
% cube_info->depth while merging their color statistics into their parent
% node.
%
% The format of the PruneToCubeDepth method is:
%
% PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneToCubeDepth(cube_info,node_info->child[i]);
if (node_info->level > cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImage() analyzes the colors within a reference image and chooses a
% fixed number of colors to represent the image. The goal of the algorithm
% is to minimize the color difference between the input and output image while
% minimizing the processing time.
%
% The format of the QuantizeImage method is:
%
% MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
Image *image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
size_t
depth,
maximum_colors;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
if (image->alpha_trait != BlendPixelTrait)
{
if (SetImageGray(image,exception) != MagickFalse)
(void) SetGrayscaleImage(image,exception);
}
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if ((quantize_info->dither_method != NoDitherMethod) && (depth > 2))
depth--;
if ((image->alpha_trait == BlendPixelTrait) && (depth > 5))
depth--;
if (SetImageGray(image,exception) != MagickFalse)
depth=MaxTreeDepth;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,image,exception);
if (status != MagickFalse)
{
/*
Reduce the number of colors in the image.
*/
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
status=AssignImageColors(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImages() analyzes the colors within a set of reference images and
% chooses a fixed number of colors to represent the set. The goal of the
% algorithm is to minimize the color difference between the input and output
% images while minimizing the processing time.
%
% The format of the QuantizeImages method is:
%
% MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
% Image *images,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: Specifies a pointer to a list of Image structures.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
Image *images,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
proceed,
status;
MagickProgressMonitor
progress_monitor;
register ssize_t
i;
size_t
depth,
maximum_colors,
number_images;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (GetNextImageInList(images) == (Image *) NULL)
{
/*
Handle a single image with QuantizeImage.
*/
status=QuantizeImage(quantize_info,images,exception);
return(status);
}
status=MagickFalse;
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if (quantize_info->dither_method != NoDitherMethod)
depth--;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return(MagickFalse);
}
number_images=GetImageListLength(images);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,
image->client_data);
status=ClassifyImageColors(cube_info,image,exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
if (status != MagickFalse)
{
/*
Reduce the number of colors in an image sequence.
*/
ReduceImageColors(images,cube_info);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor)
NULL,image->client_data);
status=AssignImageColors(image,cube_info,exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,
image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u a n t i z e E r r o r F l a t t e n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeErrorFlatten() traverses the color cube and flattens the quantization
% error into a sorted 1D array. This accelerates the color reduction process.
%
% Contributed by Yoya.
%
% The format of the QuantizeErrorFlatten method is:
%
% size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
% const NodeInfo *node_info,const ssize_t offset,
% double *quantize_error)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is current pointer.
%
% o offset: quantize error offset.
%
% o quantize_error: the quantization error vector.
%
*/
static size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
const NodeInfo *node_info,const ssize_t offset,double *quantize_error)
{
register ssize_t
i;
size_t
n,
number_children;
if (offset >= (ssize_t) cube_info->nodes)
return(0);
quantize_error[offset]=node_info->quantize_error;
n=1;
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children ; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n,
quantize_error);
return(n);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Reduce() traverses the color cube tree and prunes any node whose
% quantization error falls below a particular threshold.
%
% The format of the Reduce method is:
%
% Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
{
register ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
Reduce(cube_info,node_info->child[i]);
if (node_info->quantize_error <= cube_info->pruning_threshold)
PruneChild(cube_info,node_info);
else
{
/*
Find minimum pruning threshold.
*/
if (node_info->number_unique > 0)
cube_info->colors++;
if (node_info->quantize_error < cube_info->next_threshold)
cube_info->next_threshold=node_info->quantize_error;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReduceImageColors() repeatedly prunes the tree until the number of nodes
% with n2 > 0 is less than or equal to the maximum number of colors allowed
% in the output image. On any given iteration over the tree, it selects
% those nodes whose E value is minimal for pruning and merges their
% color statistics upward. It uses a pruning threshold, Ep, to govern
% node selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors
% within the cubic volume which the node represents. This includes n1 -
% n2 pixels whose colors should be defined by nodes at a lower level in
% the tree.
%
% The format of the ReduceImageColors method is:
%
% ReduceImageColors(const Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static int QuantizeErrorCompare(const void *error_p,const void *error_q)
{
double
*p,
*q;
p=(double *) error_p;
q=(double *) error_q;
if (*p > *q)
return(1);
if (fabs(*q-*p) <= MagickEpsilon)
return(0);
return(-1);
}
static void ReduceImageColors(const Image *image,CubeInfo *cube_info)
{
#define ReduceImageTag "Reduce/Image"
MagickBooleanType
proceed;
MagickOffsetType
offset;
size_t
span;
cube_info->next_threshold=0.0;
if (cube_info->colors > cube_info->maximum_colors)
{
double
*quantize_error;
/*
Enable rapid reduction of the number of unique colors.
*/
quantize_error=(double *) AcquireQuantumMemory(cube_info->nodes,
sizeof(*quantize_error));
if (quantize_error != (double *) NULL)
{
(void) QuantizeErrorFlatten(cube_info,cube_info->root,0,
quantize_error);
qsort(quantize_error,cube_info->nodes,sizeof(double),
QuantizeErrorCompare);
if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100))
cube_info->next_threshold=quantize_error[cube_info->nodes-110*
(cube_info->maximum_colors+1)/100];
quantize_error=(double *) RelinquishMagickMemory(quantize_error);
}
}
for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; )
{
cube_info->pruning_threshold=cube_info->next_threshold;
cube_info->next_threshold=cube_info->root->quantize_error-1;
cube_info->colors=0;
Reduce(cube_info,cube_info->root);
offset=(MagickOffsetType) span-cube_info->colors;
proceed=SetImageProgress(image,ReduceImageTag,offset,span-
cube_info->maximum_colors+1);
if (proceed == MagickFalse)
break;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImage() replaces the colors of an image with the closest of the colors
% from the reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
% Image *image,const Image *remap_image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o remap_image: the reference image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
Image *image,const Image *remap_image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
/*
Initialize color cube.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(remap_image != (Image *) NULL);
assert(remap_image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
status=AssignImageColors(image,cube_info,exception);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImages() replaces the colors of a sequence of images with the
% closest color from a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
% Image *images,Image *remap_image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: the image sequence.
%
% o remap_image: the reference image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
Image *images,const Image *remap_image,ExceptionInfo *exception)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
status;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=images;
if (remap_image == (Image *) NULL)
{
/*
Create a global colormap for an image sequence.
*/
status=QuantizeImages(quantize_info,images,exception);
return(status);
}
/*
Classify image colors from the reference image.
*/
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
status=AssignImageColors(image,cube_info,exception);
if (status == MagickFalse)
break;
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetGrayscaleImage() converts an image to a PseudoClass grayscale image.
%
% The format of the SetGrayscaleImage method is:
%
% MagickBooleanType SetGrayscaleImage(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
double
intensity;
PixelInfo
*color_1,
*color_2;
color_1=(PixelInfo *) x;
color_2=(PixelInfo *) y;
intensity=GetPixelInfoIntensity((const Image *) NULL,color_1)-
GetPixelInfoIntensity((const Image *) NULL,color_2);
if (intensity < (double) INT_MIN)
intensity=(double) INT_MIN;
if (intensity > (double) INT_MAX)
intensity=(double) INT_MAX;
return((int) intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickBooleanType SetGrayscaleImage(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
PixelInfo
*colormap;
register ssize_t
i;
size_t
extent;
ssize_t
*colormap_index,
j,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->type != GrayscaleType)
(void) TransformImageColorspace(image,GRAYColorspace,exception);
extent=MagickMax(image->colors+1,MagickMax(MaxColormapSize,MaxMap+1));
colormap_index=(ssize_t *) AcquireQuantumMemory(extent,
sizeof(*colormap_index));
if (colormap_index == (ssize_t *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
if (image->storage_class != PseudoClass)
{
(void) memset(colormap_index,(-1),extent*sizeof(*colormap_index));
if (AcquireImageColormap(image,MaxColormapSize,exception) == MagickFalse)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
image->colors=0;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register size_t
intensity;
intensity=ScaleQuantumToMap(GetPixelRed(image,q));
if (colormap_index[intensity] < 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SetGrayscaleImage)
#endif
if (colormap_index[intensity] < 0)
{
colormap_index[intensity]=(ssize_t) image->colors;
image->colormap[image->colors].red=(double)
GetPixelRed(image,q);
image->colormap[image->colors].green=(double)
GetPixelGreen(image,q);
image->colormap[image->colors].blue=(double)
GetPixelBlue(image,q);
image->colors++;
}
}
SetPixelIndex(image,(Quantum) colormap_index[intensity],q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
}
(void) memset(colormap_index,0,extent*sizeof(*colormap_index));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].alpha=(double) i;
qsort((void *) image->colormap,image->colors,sizeof(PixelInfo),
IntensityCompare);
colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,sizeof(*colormap));
if (colormap == (PixelInfo *) NULL)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
j=0;
colormap[j]=image->colormap[0];
for (i=0; i < (ssize_t) image->colors; i++)
{
if (IsPixelInfoEquivalent(&colormap[j],&image->colormap[i]) == MagickFalse)
{
j++;
colormap[j]=image->colormap[i];
}
colormap_index[(ssize_t) image->colormap[i].alpha]=j;
}
image->colors=(size_t) (j+1);
image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap);
image->colormap=colormap;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(image,(Quantum) colormap_index[ScaleQuantumToMap(
GetPixelIndex(image,q))],q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
image->type=GrayscaleType;
if (SetImageMonochrome(image,exception) != MagickFalse)
image->type=BilevelType;
return(status);
}
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 24;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
ast-dump-openmp-distribute-simd.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp distribute simd
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp distribute simd
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp distribute simd collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp distribute simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp distribute simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-distribute-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPDistributeSimdDirective {{.*}} <line:4:1, col:28>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPDistributeSimdDirective {{.*}} <line:10:1, col:28>
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPDistributeSimdDirective {{.*}} <line:17:1, col:40>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:29, col:39>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:38> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:38> 'int' 1
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPDistributeSimdDirective {{.*}} <line:24:1, col:40>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:29, col:39>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:38> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:38> 'int' 2
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPDistributeSimdDirective {{.*}} <line:31:1, col:40>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:29, col:39>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:38> 'int'
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:38> 'int' 2
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute-simd.c:31:1) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
SpatialAdaptiveAveragePooling.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/SpatialAdaptiveAveragePooling.c"
#else
#define START_IND(a,b,c) (int)floor((float)(a * c) / b)
#define END_IND(a,b,c) (int)ceil((float)((a + 1) * c) / b)
// #define START_IND(a,b,c) a * c / b
// #define END_IND(a,b,c) (a + 1) * c / b + ((a + 1) * c % b > 0)?1:0
static void THNN_(SpatialAdaptiveAveragePooling_updateOutput_frame)(
real *input_p,
real *output_p,
long nslices,
long iwidth,
long iheight,
long owidth,
long oheight,
long stridew,
long strideh,
long strided)
{
long k;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
/* loop over output */
long i, j;
for(i = 0; i < oheight; i++)
{
int y_start = START_IND(i, oheight, iheight);
int y_end = END_IND(i, oheight, iheight);
int kH = y_end-y_start;
for(j = 0; j < owidth; j++)
{
int x_start = START_IND(j, owidth, iwidth);
int x_end = END_IND(j, owidth, iwidth);
int kW = x_end-x_start;
/* local pointers */
real *ip = input_p + k*strided + y_start*strideh + x_start*stridew;
real *op = output_p + k*owidth*oheight + i*owidth + j;
/* compute local average: */
real sum = 0;
int x,y;
for(y = 0; y < kH; y++)
{
for(x = 0; x < kW; x++)
{
real val = *(ip + y*strideh + x*stridew);
sum += val;
}
}
/* set output to local average */
*op = sum / kW / kH;
}
}
}
}
void THNN_(SpatialAdaptiveAveragePooling_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
int owidth,
int oheight)
{
int dimw = 2;
int dimh = 1;
long nbatch = 1;
long nslices;
long iheight;
long iwidth;
long istride_d;
long istride_h;
long istride_w;
long istride_b;
real *input_data;
real *output_data;
THNN_ARGCHECK(input->nDimension == 3 || input->nDimension == 4, 2, input,
"3D or 4D (batch mode) tensor expected for input, but got: %s");
if (input->nDimension == 4)
{
istride_b = input->stride[0];
nbatch = input->size[0];
dimw++;
dimh++;
}
/* sizes */
nslices = input->size[dimh-1];
iheight = input->size[dimh];
iwidth = input->size[dimw];
/* strides */
istride_d = input->stride[dimh-1];
istride_h = input->stride[dimh];
istride_w = input->stride[dimw];
/* resize output */
if (input->nDimension == 3)
{
THTensor_(resize3d)(output, nslices, oheight, owidth);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
THNN_(SpatialAdaptiveAveragePooling_updateOutput_frame)(input_data, output_data,
nslices,
iwidth, iheight,
owidth, oheight,
istride_w,istride_h,
istride_d);
}
else
{
long p;
THTensor_(resize4d)(output, nbatch, nslices, oheight, owidth);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
#pragma omp parallel for private(p)
for (p = 0; p < nbatch; p++)
{
THNN_(SpatialAdaptiveAveragePooling_updateOutput_frame)(input_data+p*istride_b, output_data+p*nslices*owidth*oheight,
nslices,
iwidth, iheight,
owidth, oheight,
istride_w,istride_h,
istride_d);
}
}
}
static void THNN_(SpatialAdaptiveAveragePooling_updateGradInput_frame)(
real *gradInput_p,
real *gradOutput_p,
long nslices,
long iwidth,
long iheight,
long owidth,
long oheight)
{
long k;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
real *gradInput_p_k = gradInput_p + k*iwidth*iheight;
real *gradOutput_p_k = gradOutput_p + k*owidth*oheight;
/* calculate average */
long i, j;
for(i = 0; i < oheight; i++)
{
int y_start = START_IND(i, oheight, iheight);
int y_end = END_IND(i, oheight, iheight);
int kH = y_end-y_start;
for(j = 0; j < owidth; j++)
{
int x_start = START_IND(j, owidth, iwidth);
int x_end = END_IND(j, owidth, iwidth);
int kW = x_end-x_start;
int x,y;
for(y = y_start; y < y_end; y++)
{
for(x = x_start; x < x_end; x++)
{
/* update gradient */
gradInput_p_k[y*iwidth + x] += gradOutput_p_k[i*owidth + j] / kW / kH;
}
}
}
}
}
}
void THNN_(SpatialAdaptiveAveragePooling_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput)
{
int dimw = 2;
int dimh = 1;
long nbatch = 1;
int nslices;
int iheight;
int iwidth;
int oheight;
int owidth;
real *gradInput_data;
real *gradOutput_data;
/* get contiguous gradOutput */
gradOutput = THTensor_(newContiguous)(gradOutput);
/* resize */
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
if (input->nDimension == 4) {
nbatch = input->size[0];
dimw++;
dimh++;
}
/* sizes */
nslices = input->size[dimh-1];
iheight = input->size[dimh];
iwidth = input->size[dimw];
oheight = gradOutput->size[dimh];
owidth = gradOutput->size[dimw];
/* get raw pointers */
gradInput_data = THTensor_(data)(gradInput);
gradOutput_data = THTensor_(data)(gradOutput);
/* backprop */
if (input->nDimension == 3)
{
THNN_(SpatialAdaptiveAveragePooling_updateGradInput_frame)(gradInput_data, gradOutput_data,
nslices,
iwidth, iheight,
owidth, oheight);
}
else
{
long p;
#pragma omp parallel for private(p)
for (p = 0; p < nbatch; p++)
{
THNN_(SpatialAdaptiveAveragePooling_updateGradInput_frame)(gradInput_data+p*nslices*iwidth*iheight, gradOutput_data+p*nslices*owidth*oheight,
nslices,
iwidth, iheight,
owidth, oheight);
}
}
/* cleanup */
THTensor_(free)(gradOutput);
}
#endif
#undef START_IND
#undef END_IND |
bi_dir_ctx.h | /*
* Copyright (c) 2018 Intel Corporation. All rights reserved.
* This software is available to you under the BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
static inline void bi_bw_ctx (int len, perf_metrics_t *metric_info)
{
double start = 0.0, end = 0.0;
int dest = partner_node(metric_info);
unsigned long int i, j;
static int check_once = 0;
if (!check_once) {
/* check to see whether sender and receiver are the same process */
if (dest == metric_info->my_node) {
fprintf(stderr, "Warning: Sender and receiver are the same "
"process (%d)\n", dest);
}
/* hostname validation for all sender and receiver processes */
int status = check_hostname_validation(metric_info);
if (status != 0) return;
check_once++;
}
shmem_barrier_all();
#pragma omp parallel default(none) firstprivate(len, dest) private(i, j) \
shared(metric_info, start, end) num_threads(metric_info->nthreads)
{
const int thread_id = omp_get_thread_num();
shmem_ctx_t ctx;
int err = shmem_ctx_create(SHMEM_CTX_PRIVATE, &ctx);
if (err) {
printf("PE %d, Thr. %d: Error, context creation failed\n",
metric_info->my_node, thread_id);
/* Exit with success to avoid test failures in automated testing */
shmem_global_exit(0);
}
for (i = 0; i < metric_info->warmup; i++) {
for(j = 0; j < metric_info->window_size; j++) {
#ifdef USE_NONBLOCKING_API
shmem_ctx_putmem_nbi(ctx, metric_info->dest + thread_id * len,
metric_info->src + thread_id * len, len, dest);
#else
shmem_ctx_putmem(ctx, metric_info->dest + thread_id * len,
metric_info->src + thread_id * len, len, dest);
#endif
}
shmem_ctx_quiet(ctx);
}
shmem_ctx_destroy(ctx);
}
shmem_barrier_all();
if (streaming_node(metric_info)) {
#pragma omp parallel default(none) firstprivate(len, dest) private(i, j) \
shared(metric_info, start, end) num_threads(metric_info->nthreads)
{
const int thread_id = omp_get_thread_num();
shmem_ctx_t ctx;
int err = shmem_ctx_create(SHMEM_CTX_PRIVATE, &ctx);
if (err) {
printf("PE %d, Thr. %d: Error, context creation failed\n",
metric_info->my_node, thread_id);
/* Exit with success to avoid test failures in automated testing */
shmem_global_exit(0);
}
#pragma omp barrier
#pragma omp master
{
start = perf_shmemx_wtime();
}
for (i = 0; i < metric_info->trials; i++) {
for(j = 0; j < metric_info->window_size; j++) {
#ifdef USE_NONBLOCKING_API
shmem_ctx_putmem_nbi(ctx, metric_info->dest + thread_id * len,
metric_info->src + thread_id * len, len, dest);
#else
shmem_ctx_putmem(ctx, metric_info->dest + thread_id * len,
metric_info->src + thread_id * len, len, dest);
#endif
}
shmem_ctx_quiet(ctx);
}
shmem_ctx_destroy(ctx);
}
} else {
#pragma omp parallel default(none) firstprivate(len, dest) private(i, j) \
shared(metric_info, start, end) num_threads(metric_info->nthreads)
{
const int thread_id = omp_get_thread_num();
shmem_ctx_t ctx;
int err = shmem_ctx_create(SHMEM_CTX_PRIVATE, &ctx);
if (err) {
printf("PE %d, Thr. %d: Error, context creation failed\n",
metric_info->my_node, thread_id);
/* Exit with success to avoid test failures in automated testing */
shmem_global_exit(0);
}
for (i = 0; i < metric_info->trials; i++) {
for(j = 0; j < metric_info->window_size; j++) {
#ifdef USE_NONBLOCKING_API
shmem_ctx_putmem_nbi(ctx, metric_info->dest + thread_id * len,
metric_info->src + thread_id * len, len, dest);
#else
shmem_ctx_putmem(ctx, metric_info->dest + thread_id * len,
metric_info->src + thread_id * len, len, dest);
#endif
}
shmem_ctx_quiet(ctx);
}
shmem_ctx_destroy(ctx);
}
}
shmem_barrier_all();
if (streaming_node(metric_info)) {
end = perf_shmemx_wtime();
calc_and_print_results(end, start, len, metric_info);
}
shmem_barrier_all();
}
|
omp_dsyr2k_batch.c | /**
* @file omp_dsyr2k_batch.c
*
* @brief BBLAS omp_dsyr2k_batch double routine.
*
* BBLAS is a software package provided by Univ. of Manchester,
* Univ. of Tennessee.
*
* @version 1.0.0
* @author Samuel D. Relton
* @author Pedro V. Lara
* @author Mawussi Zounon
* @date 2016-02-20
*
**/
#ifndef DOXYGEN_SHOULD_SKIP_THIS
/**
* Code generation
* @generated from ./bblas_omp/omp_zsyr2k_batch.c normal z -> d, Mon Jun 6 09:44:14 2016
**/
#endif
#include<cblas.h>
#include "bblas_omp.h"
#include "bblas.h"
#include <omp.h>
#define REAL
/**
Purpose
-------
<b>dsyr2k_batch</b> is a batch version of dsyr2k.
It performs one of the matrix-matrix operations
arrayC[i] = alpha[i]*arrayA[i]*arrayB[i]**T + alpha[i]*arrayB[i]*arrayA[i]**T +
beta[i]*arrayC[i], or
arrayC[i] = alpha[i]*arrayA**T *arrayB[i] + alpha[i]*arrayB[i]**T *arrayA[i] +
beta[i]*arrayC[i],
where alpha[i] and beta[i] are scalars, arrayC[i] is an N[i] by N[i] sym-
metric matrix and arrayA[i] and arrayB[i] are N[i] by K[i] matrices in the
first case and K[i] by N[i] matrices in the second case.
Fixed and Variable Batch Operations
-----------------------------------
Two types of batch operation are supported depending upon the value of batch_opts.
When <tt>batch_opts = BBLAS_VARIABLE</tt>
- all parameters that are arrays must have length at least batch_count.
- all parameters that are arrays must have all values set.
When <tt>batch_opts = BBLAS_FIXED</tt>
- all parameters that are arrays (except for arrayA, arrayB, arrayC, and info)
must have length at least one.
- all parameters that are arrays (except for arrayA, arrayB, arrayC, and info)
need only to have their first value set.
This means that for a <tt>BBLAS_FIXED</tt> batch,
the values of uplo[0], trans[0], N[0], K[0],
alpha[0], beta[0], lda[0], ldb[0], and ldc[0] are used for all computations.
Parameters
----------
@param[in]
uplo Array of <tt>enum BBLAS_UPLO</tt>.
On entry, uplo[i] specifies whether the upper or
lower triangular part of the matrix arrayC[i] is to
be referenced as follows:
- = 'BblasUpper' Only the upper triangular part of
the matrix is to be referenced.
- = 'BblasLower' Only the lower triangular part of
the matrix is to be referenced.
@param[in]
trans Array of <tt>enum BBLAS_TRANS</tt>.
On entry, trans[i] specifies the operation to be
performed as follows:
- = 'BblasNoTrans' arrayC[i] = alpha[i]*arrayA[i]*arrayB[i]**T +
alpha[i]*arrayB[i]*arrayA[i]**T + beta[i]*arrayC[i]
- = 'BblasTrans' arrayC[i] = alpha[i]*arrayA[i]**T *arrayB[i] +
alpha[i]*arrayB[i]**T *arrayA[i] + beta[i]*arrayC[i].
@param[in]
N Array of <tt>int</tt>.
Each element N[i] specifies the number of rows and columns of the matrix
arrayC[i]. N[i] must be greater than zero.
@param[in]
K Array of <tt>int</tt>.
On entry with trans[i] = 'BblasNoTrans', K[i] specifies the
number of columns of the matrices arrayA[i] and arrayB[i],
and upon entry with trans[i] = 'BblasTrans',
K[i] specifies the number of rows of the matrices arrayA[i] and arrayB[i].
K[i] must be greater than zero.
@param[in]
alpha Array of <tt>real_16</tt>.
@param[in]
arrayA Array of pointers.
Each element arrayA[i] is a pointer to a DOUBLE PRECISION matrix of
dimension lda[i] by Ka[i],
where Ka[i] = K[i] when transA[i] = BblasNoTrans and is N[i] otherwise.
Before entry with transA[i] = BblasNoTrans, the leading N[i] by K[i]
part of the arrayA[i] must contain the elements of arrayA[i], otherwise
the leading K[i] by N[i] part of the arrayA[i] must contain the
elements of arrayA[i].
@param[in]
lda Array of <tt>int</tt>.
On entry, lda[i] specifies the first dimension of arrayA[i] as declared
in the calling (sub) program. When transA[i] = BblasNoTrans then
lda[i] must be at least max( 1, N[i] ), otherwise lda[i] must be at
least max( 1, K[i] ).
@param[in]
arrayB Array of pointers.
Each element arrayB[i] is a pointer to a DOUBLE PRECISION matrix of
dimension lda[i] by Ka[i],
where Ka[i] = K[i] when transA[i] = BblasNoTrans and is N[i] otherwise.
Before entry with transA[i] = BblasNoTrans, the leading N[i] by K[i]
part of the arrayB[i] must contain the elements of arrayB[i], otherwise
the leading K[i] by N[i] part of the arrayB[i] must contain the
elements of arrayB[i].
@param[in]
ldb Array of <tt>int</tt>.
On entry, ldb[i] specifies the first dimension of arrayA[i] as declared
in the calling (sub) program. When transA[i] = BblasNoTrans then
ldb[i] must be at least max( 1, N[i] ), otherwise ldb[i] must be at
least max( 1, K[i] ).
@param[in]
beta Array of <tt>real_16</tt>.
When beta[i] is set to zero arrayC[i] need not be set on input.
@param[in,out]
arrayC Array of pointers.
Each elements arrayC[i] is a pointer to a DOUBLE PRECISION matrix of
dimension ldc[i] by N[i].
Before entry with uplo[i] = 'BblasUpper', the leading
N[i] by N[i] upper triangular part of the arrayC[i] must con-
tain the upper triangular part of the symmetric
matrix and the strictly lower triangular part of arrayC[i]
is not referenced. On exit, the upper triangular
part of the arrayC[i] is overwritten by the upper triangular part
of the updated matrix.
Before entry with uplo[i] = 'BlasLower', the leading N[i] by N[i] lower
triangular part of the arrayC[i] must contain the lower
triangular part of the symmetric matrix and the
strictly upper triangular part of arrayC[i] is not referenced.
On exit, the lower triangular part of the
arrayC[i] is overwritten by the lower triangular part
of the updated matrix.
@param[in]
ldc Array of <tt>int</tt>.
On entry, ldc[i] specifies the first dimension of arrayC[i] as declared
in the calling (sub) program. Each element ldc must be at least max( 1, N[i] ).
@param[in]
batch_count <tt>int</tt>
The number of matrices to operate on.
@param[in]
batch_opts <tt>enum BBLAS_OPTS</tt>
One of BBLAS_FIXED or BBLAS_VARIABLE depending upon the type of
batch operation required.
@param[out]
info Array of <tt>int</tt>.
Each element info[i] is the error return code of the ith dsyr2k in the batch,
these need not be set on entry.
The error codes can be found in bblas_macros.h.
**/
void omp_dsyr2k_batch(
const enum BBLAS_UPLO *uplo, const enum BBLAS_TRANS *trans,
const int *N, const int *K, const double *alpha,
const double **arrayA, const int *lda,
const double **arrayB, const int *ldb,
const double *beta, double **arrayC,
const int *ldc, const int batch_count, enum BBLAS_OPTS batch_opts, int *info)
{
/*Local variables */
int first_index = 0;
int batch_iter;
int LDA, LDB;
char func_name[15] = "dsyr2k_batch";
/* Check input arguments */
if (batch_count < 0)
{
xerbla_batch(func_name, BBLAS_ERR_BATCH_COUNT, -1);
}
if (batch_opts == BBLAS_FIXED)
{
if ((uplo[first_index] != BblasUpper) &&
(uplo[first_index] != BblasLower))
{
xerbla_batch(func_name, BBLAS_ERR_UPLO, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_UPLO;
}
return;
}
if ((trans[first_index] != BblasNoTrans) &&
(trans[first_index] != BblasTrans) &&
(trans[first_index] != BblasConjTrans))
{
xerbla_batch(func_name, BBLAS_ERR_TRANS, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_TRANS;
}
return;
}
if (N[first_index] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_N, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_N;
}
return;
}
if (K[first_index] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_K, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_K;
}
return;
}
if (trans[first_index] == BblasNoTrans)
{
LDA = N[first_index];
LDB = N[first_index];
} else
{
LDA = K[first_index];
LDB = K[first_index];
}
if (lda[first_index] < max(1,LDA))
{
xerbla_batch(func_name, BBLAS_ERR_LDA, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_LDA;
}
return;
}
if (ldb[first_index] < max(1, LDB))
{
xerbla_batch(func_name, BBLAS_ERR_LDB, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_LDB;
}
return;
}
if (ldc[first_index] < max(1, N[first_index]))
{
xerbla_batch(func_name, BBLAS_ERR_LDC, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_LDC;
}
return;
}
/* particular case */
if (N[first_index] == 0 || K[first_index] == 0 ||
(alpha[first_index] == (double)0.0 ||
beta[first_index] == (double)1.0))
{
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_SUCCESS;
}
return;
}
#pragma omp parallel for private(batch_iter)
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
/*Call to cblas_dsyr2k */
cblas_dsyr2k(
BblasColMajor,
uplo[first_index],
trans[first_index],
N[first_index],
K[first_index],
(alpha[first_index]),
arrayA[batch_iter],
lda[first_index],
arrayB[batch_iter],
ldb[first_index],
(beta[first_index]),
arrayC[batch_iter],
ldc[first_index]);
/* Successful */
info[batch_iter] = BBLAS_SUCCESS;
} /*END FIXED SIZE FOR LOOP */
}else if (batch_opts == BBLAS_VARIABLE)
{
#pragma omp parallel for private (batch_iter, LDA, LDB)
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
/* Check input arguments */
if ((uplo[batch_iter] != BblasUpper) &&
(uplo[batch_iter] != BblasLower))
{
xerbla_batch(func_name, BBLAS_ERR_UPLO, batch_iter);
info[batch_iter] = BBLAS_ERR_UPLO;
continue;
}
if ((trans[batch_iter] != BblasNoTrans) &&
(trans[batch_iter] != BblasTrans) &&
(trans[batch_iter] != BblasConjTrans))
{
xerbla_batch(func_name, BBLAS_ERR_TRANS, batch_iter);
info[batch_iter] = BBLAS_ERR_TRANS;
continue;
}
if (N[batch_iter] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_N, batch_iter);
info[batch_iter] = BBLAS_ERR_N;
continue;
}
if (K[batch_iter] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_K, batch_iter);
info[batch_iter] = BBLAS_ERR_K;
continue;
}
if (trans[batch_iter] == BblasNoTrans)
{
LDA = N[batch_iter];
LDB = N[batch_iter];
} else
{
LDA = K[batch_iter];
LDB = K[batch_iter];
}
if (lda[batch_iter] < max(1, LDA))
{
xerbla_batch(func_name, BBLAS_ERR_LDA, batch_iter);
info[batch_iter] = BBLAS_ERR_LDA;
continue;
}
if (ldb[batch_iter] < max(1, LDB))
{
xerbla_batch(func_name, BBLAS_ERR_LDB, batch_iter);
info[batch_iter] = BBLAS_ERR_LDB;
continue;
}
if (ldc[batch_iter] < max(1, N[batch_iter]))
{
xerbla_batch(func_name, BBLAS_ERR_LDC, batch_iter);
info[batch_iter] = BBLAS_ERR_LDC;
continue;
}
/* particular case */
if (N[batch_iter] == 0 || K[batch_iter] == 0 ||
((alpha[batch_iter] == (double)0.0) &&
beta[batch_iter] == (double)1.0))
{
info[batch_iter] = BBLAS_SUCCESS;
continue;
}
cblas_dsyr2k(
BblasColMajor,
uplo[batch_iter],
trans[batch_iter],
N[batch_iter],
K[batch_iter],
(alpha[batch_iter]),
arrayA[batch_iter],
lda[batch_iter],
arrayB[batch_iter],
ldb[batch_iter],
(beta[batch_iter]),
arrayC[batch_iter],
ldc[batch_iter]);
/* Successful */
info[batch_iter] = BBLAS_SUCCESS;
}
}else
{
xerbla_batch(func_name, BBLAS_ERR_BATCH_OPTS, -1);
}
}
#undef REAL
|
declare_variant_ast_print.c | // RUN: %clang_cc1 -verify -fopenmp -x c -std=c99 -ast-print %s -o - -Wno-openmp-clauses | FileCheck %s
// RUN: %clang_cc1 -verify -fopenmp-simd -x c -std=c99 -ast-print %s -o - -Wno-openmp-clauses | FileCheck %s
// expected-no-diagnostics
int foo(void);
#pragma omp declare variant(foo) match(xxx={}, yyy={ccc})
#pragma omp declare variant(foo) match(xxx={vvv})
#pragma omp declare variant(foo) match(implementation={vendor(score(0):llvm)}, device={kind(fpga)})
#pragma omp declare variant(foo) match(implementation={vendor(llvm), xxx})
#pragma omp declare variant(foo) match(implementation={vendor(unknown)}, device={kind(gpu)})
#pragma omp declare variant(foo) match(implementation={vendor(score(5): ibm, xxx, ibm)}, device={kind(cpu, nohost)})
#pragma omp declare variant(foo) match(device={kind(host)})
#pragma omp declare variant(foo) match(device={kind(nohost), xxx})
int bar(void);
// CHECK: int foo();
// CHECK-NEXT: #pragma omp declare variant(foo) match(device={kind(nohost)})
// CHECK-NEXT: #pragma omp declare variant(foo) match(device={kind(host)})
// CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={vendor(score(5): ibm)}, device={kind(cpu, nohost)})
// CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={vendor(unknown)}, device={kind(gpu)})
// CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={vendor(llvm)})
// CHECK-NEXT: #pragma omp declare variant(foo) match(implementation={vendor(score(0): llvm)}, device={kind(fpga)})
// CHECK-NEXT: int bar();
|
communicator.c | #include "communicator.h"
#include "parallel.h"
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <stdio.h>
static tci_comm _tci_single = {NULL, 1, 0, 1, 0};
tci_comm* const tci_single = &_tci_single;
int tci_comm_init_single(tci_comm* comm)
{
comm->context = NULL;
comm->nthread = 1;
comm->tid = 0;
comm->ngang = 1;
comm->gid = 0;
return 0;
}
int tci_comm_init(tci_comm* comm, tci_context* context,
unsigned nthread, unsigned tid, unsigned ngang, unsigned gid)
{
comm->context = context;
comm->nthread = nthread;
comm->tid = tid;
comm->ngang = ngang;
comm->gid = gid;
if (context) tci_context_attach(comm->context);
return 0;
}
int tci_comm_destroy(tci_comm* comm)
{
if (comm->context)
{
return tci_context_detach(comm->context);
}
else
{
return 0;
}
}
int tci_comm_is_master(const tci_comm* comm)
{
return comm->tid == 0;
}
int tci_comm_barrier(tci_comm* comm)
{
if (!comm->context) return 0;
return tci_context_barrier(comm->context, comm->tid);
}
int tci_comm_bcast(tci_comm* comm, void** object, unsigned root)
{
if (!comm->context) return 0;
if (comm->tid == root)
{
return tci_context_send(comm->context, comm->tid, *object);
}
else
{
return tci_context_receive(comm->context, comm->tid, object);
}
}
int tci_comm_bcast_nowait(tci_comm* comm, void** object, unsigned root)
{
if (!comm->context) return 0;
if (comm->tid == root)
{
return tci_context_send_nowait(comm->context, comm->tid, *object);
}
else
{
return tci_context_receive_nowait(comm->context, comm->tid, object);
}
}
int tci_comm_gang(tci_comm* parent, tci_comm* child,
int type, unsigned n, unsigned bs)
{
unsigned nt = parent->nthread;
unsigned tid = parent->tid;
if (n == 1) return tci_comm_init(child, parent->context, nt, tid, 1, 0);
if (n >= nt) return tci_comm_init(child, NULL, 1, 0, nt, tid);
unsigned new_tid = 0;
unsigned new_nthread = 0;
unsigned block = 0;
switch (type & ~TCI_NO_CONTEXT)
{
case TCI_EVENLY:
{
block = (n*tid)/nt;
unsigned block_first = (block*nt+n-1)/n;
unsigned block_last = ((block+1)*nt+n-1)/n;
new_tid = tid-block_first;
new_nthread = block_last-block_first;
}
break;
case TCI_CYCLIC:
{
block = tid%n;
new_tid = tid/n;
new_nthread = (nt-block+n-1)/n;
}
break;
case TCI_BLOCK_CYCLIC:
{
block = (tid/bs)%n;
unsigned nsubblock_tot = nt/bs;
unsigned nsubblock = nsubblock_tot/n;
new_tid = ((tid/bs)/n)*bs + (tid%bs);
new_nthread = nsubblock*bs +
TCI_MIN(bs, nt-nsubblock*n*bs-block*bs);
}
break;
case TCI_BLOCKED:
{
bs = (nt+n-1)/n;
block = tid/bs;
new_tid = tid-block*bs;
new_nthread = TCI_MIN(bs, nt-block*bs);
}
break;
default: return EINVAL;
}
if (!parent->context || (type & TCI_NO_CONTEXT))
{
tci_comm_init(child, NULL, new_nthread, new_tid, n, block);
}
else
{
tci_context* contexts_buf[n];
tci_context** contexts = &contexts_buf[0];
memset(contexts_buf, 0, sizeof(contexts_buf));
tci_comm_bcast_nowait(parent, (void**)&contexts, 0);
if (new_tid == 0 && new_nthread > 1)
{
tci_context_init(&contexts[block], new_nthread,
parent->context->barrier.group_size);
}
tci_comm_barrier(parent);
tci_comm_init(child, contexts[block], new_nthread, new_tid, n, block);
tci_comm_barrier(parent);
}
return 0;
}
#if TCI_USE_OPENMP_THREADS || TCI_USE_PTHREADS_THREADS || TCI_USE_WINDOWS_THREADS
static void tci_distribute(unsigned n, unsigned idx, tci_comm* comm,
tci_range range, tci_range_func func, void* payload)
{
if (n == 1)
{
func(comm, 0, range.size, payload);
return;
}
range.grain = TCI_MAX(range.grain, 1);
uint64_t ngrain = (range.size+range.grain-1)/range.grain;
uint64_t first = (idx*ngrain)/n;
uint64_t last = ((idx+1)*ngrain)/n;
func(comm, first*range.grain, TCI_MIN(last*range.grain, range.size), payload);
}
static void tci_distribute_2d(unsigned num, unsigned idx, tci_comm* comm,
tci_range range_m, tci_range range_n,
tci_range_2d_func func, void* payload)
{
if (num == 1)
{
func(comm, 0, range_m.size, 0, range_n.size, payload);
return;
}
unsigned m, n;
tci_partition_2x2(num, range_m.size, num, range_n.size, num, &m, &n);
unsigned idx_m = idx % m;
unsigned idx_n = idx / m;
range_m.grain = TCI_MAX(range_m.grain, 1);
range_n.grain = TCI_MAX(range_n.grain, 1);
uint64_t mgrain = (range_m.size+range_m.grain-1)/range_m.grain;
uint64_t ngrain = (range_n.size+range_n.grain-1)/range_n.grain;
uint64_t mfirst = (idx_m*mgrain)/m;
uint64_t nfirst = (idx_n*ngrain)/n;
uint64_t mlast = ((idx_m+1)*mgrain)/m;
uint64_t nlast = ((idx_n+1)*ngrain)/n;
func(comm, mfirst*range_m.grain, TCI_MIN(mlast*range_m.grain, range_m.size),
nfirst*range_n.grain, TCI_MIN(nlast*range_n.grain, range_n.size), payload);
}
#elif TCI_USE_TBB_THREADS
static void tci_distribute(unsigned n, unsigned idx, tci_comm* comm,
tci_range range, tci_range_func func, void* payload)
{
if (n == 1)
{
func(comm, 0, range.size, payload);
return;
}
range.grain = TCI_MAX(range.grain, 1);
uint64_t ngrain = (range.size+range.grain-1)/range.grain;
tbb::task_group tg;
for (unsigned idx = 0;idx < n;idx++)
{
tg.run(
[&,idx]
{
uint64_t first = (idx*ngrain)/n;
uint64_t last = ((idx+1)*ngrain)/n;
func(comm, first*range.grain, TCI_MIN(last*range.grain, range.size), payload);
});
}
tg.wait();
}
static void tci_distribute_2d(unsigned num, unsigned idx, tci_comm* comm,
tci_range range_m, tci_range range_n,
tci_range_2d_func func, void* payload)
{
if (num == 1)
{
func(comm, 0, range_m.size, 0, range_n.size, payload);
return;
}
unsigned m, n;
tci_partition_2x2(num, range_m.size, num, range_n.size, num, &m, &n);
range_m.grain = TCI_MAX(range_m.grain, 1);
range_n.grain = TCI_MAX(range_n.grain, 1);
uint64_t mgrain = (range_m.size+range_m.grain-1)/range_m.grain;
uint64_t ngrain = (range_n.size+range_n.grain-1)/range_n.grain;
tbb::task_group tg;
for (unsigned idx_m = 0;idx_m < m;idx_m++)
{
for (unsigned idx_n = 0;idx_n < n;idx_n++)
{
tg.run(
[&,idx_m,idx_n]
{
uint64_t mfirst = (idx_m*mgrain)/m;
uint64_t nfirst = (idx_n*ngrain)/n;
uint64_t mlast = ((idx_m+1)*mgrain)/m;
uint64_t nlast = ((idx_n+1)*ngrain)/n;
func(comm, mfirst*range_m.grain, TCI_MIN(mlast*range_m.grain, range_m.size),
nfirst*range_n.grain, TCI_MIN(nlast*range_n.grain, range_n.size), payload);
});
}
}
tg.wait();
}
#elif TCI_USE_OMPTASK_THREADS
static void tci_distribute(unsigned n, unsigned idx, tci_comm* comm,
tci_range range, tci_range_func func, void* payload)
{
(void)idx;
if (n == 1)
{
func(comm, 0, range.size, payload);
return;
}
range.grain = TCI_MAX(range.grain, 1);
uint64_t ngrain = (range.size+range.grain-1)/range.grain;
#pragma omp taskgroup
{
for (uint64_t idx = 0;idx < n;idx++)
{
#pragma omp task
{
uint64_t first = (idx*ngrain)/n;
uint64_t last = ((idx+1)*ngrain)/n;
func(comm, first*range.grain, TCI_MIN(last*range.grain, range.size), payload);
}
}
}
}
static void tci_distribute_2d(unsigned num, unsigned idx, tci_comm* comm,
tci_range range_m, tci_range range_n,
tci_range_2d_func func, void* payload)
{
(void)idx;
if (num == 1)
{
func(comm, 0, range_m.size, 0, range_n.size, payload);
return;
}
unsigned m, n;
tci_partition_2x2(num, range_m.size, num, range_n.size, num, &m, &n);
range_m.grain = TCI_MAX(range_m.grain, 1);
range_n.grain = TCI_MAX(range_n.grain, 1);
uint64_t mgrain = (range_m.size+range_m.grain-1)/range_m.grain;
uint64_t ngrain = (range_n.size+range_n.grain-1)/range_n.grain;
#pragma omp taskgroup
{
for (uint64_t idx_m = 0;idx_m < m;idx_m++)
{
for (uint64_t idx_n = 0;idx_n < n;idx_n++)
{
#pragma omp task
{
uint64_t mfirst = (idx_m*mgrain)/m;
uint64_t nfirst = (idx_n*ngrain)/n;
uint64_t mlast = ((idx_m+1)*mgrain)/m;
uint64_t nlast = ((idx_n+1)*ngrain)/n;
func(comm, mfirst*range_m.grain, TCI_MIN(mlast*range_m.grain, range_m.size),
nfirst*range_n.grain, TCI_MIN(nlast*range_n.grain, range_n.size), payload);
}
}
}
}
}
#elif TCI_USE_DISPATCH_THREADS
typedef struct tci_distribute_func_data
{
tci_comm* comm;
tci_range_func func;
uint64_t n;
tci_range* range;
void* payload;
} tci_distribute_func_data;
static void tci_distribute_func(void* data_, size_t idx)
{
tci_distribute_func_data* data = (tci_distribute_func_data*)data_;
uint64_t ngrain = (data->range->size+data->range->grain-1)/data->range->grain;
uint64_t first = (idx*ngrain)/data->n;
uint64_t last = ((idx+1)*ngrain)/data->n;
data->func(data->comm, first*data->range->grain,
TCI_MIN(last*data->range->grain, data->range->size), data->payload);
}
typedef struct tci_distribute_2d_func_data
{
tci_comm* comm;
tci_range_2d_func func;
uint64_t m, n;
tci_range *range_m, *range_n;
void* payload;
} tci_distribute_2d_func_data;
static void tci_distribute_2d_func(void* data_, size_t idx)
{
tci_distribute_2d_func_data* data = (tci_distribute_2d_func_data*)data_;
unsigned idx_m = idx % data->m;
unsigned idx_n = idx / data->m;
uint64_t mgrain = (data->range_m->size+data->range_m->grain-1)/data->range_m->grain;
uint64_t ngrain = (data->range_n->size+data->range_n->grain-1)/data->range_n->grain;
uint64_t mfirst = (idx_m*mgrain)/data->m;
uint64_t nfirst = (idx_n*ngrain)/data->n;
uint64_t mlast = ((idx_m+1)*mgrain)/data->m;
uint64_t nlast = ((idx_n+1)*ngrain)/data->n;
data->func(data->comm, mfirst*data->range_m->grain,
TCI_MIN(mlast*data->range_m->grain, data->range_m->size),
nfirst*data->range_n->grain,
TCI_MIN(nlast*data->range_n->grain, data->range_n->size), data->payload);
}
static void tci_distribute(unsigned n, unsigned idx, tci_comm* comm,
tci_range range, tci_range_func func, void* payload)
{
(void)idx;
if (n == 1)
{
func(comm, 0, range.size, payload);
return;
}
range.grain = TCI_MAX(range.grain, 1);
tci_distribute_func_data data = {comm, func, n, &range, payload};
dispatch_queue_t queue =
dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
dispatch_apply_f(n, queue, &data, tci_distribute_func);
}
static void tci_distribute_2d(unsigned num, unsigned idx, tci_comm* comm,
tci_range range_m, tci_range range_n,
tci_range_2d_func func, void* payload)
{
(void)idx;
if (num == 1)
{
func(comm, 0, range_m.size, 0, range_n.size, payload);
return;
}
unsigned m, n;
tci_partition_2x2(num, range_m.size, num, range_n.size, num, &m, &n);
range_m.grain = TCI_MAX(range_m.grain, 1);
range_n.grain = TCI_MAX(range_n.grain, 1);
tci_distribute_2d_func_data data = {comm, func, m, n, &range_m, &range_n, payload};
dispatch_queue_t queue =
dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
dispatch_apply_f(m*n, queue, &data, tci_distribute_2d_func);
}
#elif TCI_USE_PPL_THREADS
static void tci_distribute(unsigned n, unsigned idx, tci_comm* comm,
tci_range range, tci_range_func func, void* payload)
{
(void)idx;
if (n == 1)
{
func(comm, 0, range.size, payload);
return;
}
range.grain = TCI_MAX(range.grain, 1);
uint64_t ngrain = (range.size+range.grain-1)/range.grain;
concurrency::parallel_for(uint64_t(), n,
[&](uint64_t idx)
{
uint64_t first = (idx*ngrain)/n;
uint64_t last = ((idx+1)*ngrain)/n;
func(comm, first*range.grain,
TCI_MIN(last*range.grain, range.size), payload);
});
}
static void tci_distribute_2d(unsigned num, unsigned idx, tci_comm* comm,
tci_range range_m, tci_range range_n,
tci_range_2d_func func, void* payload)
{
(void)idx;
if (num == 1)
{
func(comm, 0, range_m.size, 0, range_n.size, payload);
return;
}
unsigned m, n;
tci_partition_2x2(num, range_m.size, num, range_n.size, num, &m, &n);
range_m.grain = TCI_MAX(range_m.grain, 1);
range_n.grain = TCI_MAX(range_n.grain, 1);
uint64_t mgrain = (range_m.size+range_m.grain-1)/range_m.grain;
uint64_t ngrain = (range_n.size+range_n.grain-1)/range_n.grain;
concurrency::parallel_for(uint64_t(), m*n,
[&](uint64_t idx)
{
unsigned idx_m = idx % m;
unsigned idx_n = idx / m;
uint64_t mfirst = (idx_m*mgrain)/m;
uint64_t nfirst = (idx_n*ngrain)/n;
uint64_t mlast = ((idx_m+1)*mgrain)/m;
uint64_t nlast = ((idx_n+1)*ngrain)/n;
func(comm, mfirst*range_m.grain,
TCI_MIN(mlast*range_m.grain, range_m.size),
nfirst*range_n.grain,
TCI_MIN(nlast*range_n.grain, range_n.size), payload);
});
}
#else // single threaded
static void tci_distribute(unsigned n, unsigned idx, tci_comm* comm,
tci_range range, tci_range_func func, void* payload)
{
(void)n;
(void)idx;
func(comm, 0, range.size, payload);
}
static void tci_distribute_2d(unsigned n, unsigned idx, tci_comm* comm,
tci_range range_m, tci_range range_n,
tci_range_2d_func func, void* payload)
{
(void)n;
(void)idx;
func(comm, 0, range_m.size, 0, range_n.size, payload);
}
#endif
void tci_comm_distribute_over_gangs(tci_comm* comm, tci_range range,
tci_range_func func, void* payload)
{
tci_distribute(comm->ngang, comm->gid, comm, range, func, payload);
}
void tci_comm_distribute_over_threads(tci_comm* comm, tci_range range,
tci_range_func func, void* payload)
{
tci_distribute(comm->nthread, comm->tid, tci_single, range, func, payload);
}
void tci_comm_distribute_over_gangs_2d(tci_comm* comm, tci_range range_m,
tci_range range_n,
tci_range_2d_func func, void* payload)
{
tci_distribute_2d(comm->ngang, comm->gid, comm, range_m, range_n,
func, payload);
}
void tci_comm_distribute_over_threads_2d(tci_comm* comm, tci_range range_m,
tci_range range_n,
tci_range_2d_func func, void* payload)
{
tci_distribute_2d(comm->nthread, comm->tid, tci_single, range_m, range_n,
func, payload);
}
|
GB_binop__bor_uint16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__bor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__bor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__bor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bor_uint16)
// A*D function (colscale): GB (_AxD__bor_uint16)
// D*A function (rowscale): GB (_DxB__bor_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__bor_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__bor_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bor_uint16)
// C=scalar+B GB (_bind1st__bor_uint16)
// C=scalar+B' GB (_bind1st_tran__bor_uint16)
// C=A+scalar GB (_bind2nd__bor_uint16)
// C=A'+scalar GB (_bind2nd_tran__bor_uint16)
// C type: uint16_t
// A type: uint16_t
// A pattern? 0
// B type: uint16_t
// B pattern? 0
// BinaryOp: cij = (aij) | (bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x) | (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BOR || GxB_NO_UINT16 || GxB_NO_BOR_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bor_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bor_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bor_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bor_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bor_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x) | (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bor_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij) | (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x) | (aij) ; \
}
GrB_Info GB (_bind1st_tran__bor_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij) | (y) ; \
}
GrB_Info GB (_bind2nd_tran__bor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
main.c | // By izanbf1803 - http://izanbf.es/
#define TITLE "Conway's game of life - izanbf.es"
#include <stdio.h>
#include <string.h>
#include <SDL2/SDL.h>
#include "screen.h"
void update(screen_t* screen)
{
#pragma omp parallel for
for (int x = 0; x < screen->W; x++) {
for (int y = 0; y < screen->H; y++) {
int n = 0; // Number of neighbours
int to_iterate[8][2] = {
{ x-1 , y+1 },
{ x , y+1 },
{ x+1 , y+1 },
{ x-1 , y },
{ x+1 , y },
{ x-1 , y-1 },
{ x , y-1 },
{ x+1 , y-1 }
};
for (int i = 0; i < 8; i++) {
if (to_iterate[i][0] >= 0 && to_iterate[i][0] < screen->W
&& to_iterate[i][1] >= 0 && to_iterate[i][1] < screen->H
&& screen->pixels[to_iterate[i][0]][to_iterate[i][1]] == 1)
{
n++;
}
}
if (screen->pixels[x][y]) {
screen->pixels_next[x][y] = (n == 2) || (n == 3);
}
else {
screen->pixels_next[x][y] = (n == 3);
}
}
}
#pragma omp parallel for
for (int x = 0; x < screen->W; x++) { // Copy temp data to real pointer (used on draw())
memcpy(screen->pixels[x], screen->pixels_next[x], screen->H * sizeof(unsigned char));
}
}
void draw(screen_t* screen, SDL_Renderer* renderer)
{
for (int x = 0; x < screen->W; x++) {
for (int y = 0; y < screen->H; y++) {
if (screen->pixels[x][y]) {
SDL_Rect rect;
rect.x = x * screen->point_size;
rect.y = y * screen->point_size;
rect.w = screen->point_size;
rect.h = screen->point_size;
SDL_RenderFillRect(renderer, &rect);
}
}
}
}
int main(int argc, char** argv) // Inizialize all values
{
SDL_Init(SDL_INIT_VIDEO);
screen_t* screen = init_game(argc, argv);
SDL_Window* win = SDL_CreateWindow(TITLE, (screen->info.w >> 1)-(screen->W >> 1), (screen->info.h >> 1)-(screen->H >> 1),
screen->W * screen->point_size, screen->H * screen->point_size, SDL_WINDOW_SHOWN);
SDL_Renderer* renderer = SDL_CreateRenderer(win, -1, SDL_RENDERER_ACCELERATED);
SDL_Event ev;
if (argc < 3 || (argv[1][0] == '.' && argv[2][0] == '.'))
SDL_SetWindowFullscreen(win, SDL_WINDOW_FULLSCREEN_DESKTOP);
unsigned char quit = 0, pause = 0, mouseDownLeft = 0, mouseDownRight = 0;
while (!quit) {
while (SDL_PollEvent(&ev)) {
switch (ev.type) {
case SDL_QUIT:
quit = 1;
break;
case SDL_KEYDOWN:
switch (ev.key.keysym.sym) {
case SDLK_ESCAPE:
case SDLK_q:
quit = 1;
break;
case SDLK_p:
pause = !pause;
break;
case SDLK_DOWN:
screen->delay <<= 1;
if (screen->delay > 500) screen->delay = 500;
break;
case SDLK_UP:
screen->delay >>= 1;
if (screen->delay < 1) screen->delay = 1;
break;
}
break;
case SDL_MOUSEBUTTONDOWN:
if (ev.button.button == SDL_BUTTON_LEFT) mouseDownLeft = 1;
if (ev.button.button == SDL_BUTTON_RIGHT) mouseDownRight = 1;
break;
case SDL_MOUSEBUTTONUP:
if (ev.button.button == SDL_BUTTON_LEFT) mouseDownLeft = 0;
if (ev.button.button == SDL_BUTTON_RIGHT) mouseDownRight = 0;
break;
case SDL_MOUSEMOTION: {
int x_ = ev.button.x / screen->point_size;
int y_ = ev.button.y / screen->point_size;
if (x_ < screen->W && y_ < screen->H) {
if (mouseDownLeft) {
screen->pixels[x_][y_] = 1;
}
else if (mouseDownRight) {
screen->pixels[x_][y_] = 0;
}
}
break;
}
}
}
SDL_RenderClear(renderer);
SDL_SetRenderDrawColor(renderer, 255, 255, 255, 1); // Set color = white
if (!pause) update(screen);
draw(screen, renderer);
SDL_SetRenderDrawColor(renderer, 0, 0, 0, 1); // Reset color to black
SDL_RenderPresent(renderer);
if (pause) {
SDL_Delay(1);
}
else {
SDL_Delay(screen->delay);
}
}
end_game(screen); // Free all screen allocations
SDL_DestroyWindow(win);
SDL_Quit();
return 0;
} |
kmp_sch_simd_guided.c | // RUN: %libomp-compile-and-run
/*
Test for the 'schedule(simd:guided)' clause.
Compiler needs to generate a dynamic dispatching and pass the schedule
value 46 to the OpenMP RTL. Test uses numerous loop parameter combinations.
*/
#include <stdio.h>
#include <omp.h>
#if defined(WIN32) || defined(_WIN32)
#include <windows.h>
#define delay() Sleep(1);
#else
#include <unistd.h>
#define delay() usleep(10);
#endif
// uncomment for debug diagnostics:
//#define DEBUG
#define SIMD_LEN 4
// ---------------------------------------------------------------------------
// Various definitions copied from OpenMP RTL
enum sched {
kmp_sch_static_balanced_chunked = 45,
kmp_sch_guided_simd = 46,
kmp_sch_runtime_simd = 47,
};
typedef unsigned u32;
typedef long long i64;
typedef unsigned long long u64;
typedef struct {
int reserved_1;
int flags;
int reserved_2;
int reserved_3;
char *psource;
} id;
extern int __kmpc_global_thread_num(id*);
extern void __kmpc_barrier(id*, int gtid);
extern void __kmpc_dispatch_init_4(id*, int, enum sched, int, int, int, int);
extern void __kmpc_dispatch_init_8(id*, int, enum sched, i64, i64, i64, i64);
extern int __kmpc_dispatch_next_4(id*, int, void*, void*, void*, void*);
extern int __kmpc_dispatch_next_8(id*, int, void*, void*, void*, void*);
// End of definitions copied from OpenMP RTL.
// ---------------------------------------------------------------------------
static id loc = {0, 2, 0, 0, ";file;func;0;0;;"};
// ---------------------------------------------------------------------------
int run_loop_64(i64 loop_lb, i64 loop_ub, i64 loop_st, int loop_chunk) {
int err = 0;
static int volatile loop_sync = 0;
i64 lb; // Chunk lower bound
i64 ub; // Chunk upper bound
i64 st; // Chunk stride
int rc;
int tid = omp_get_thread_num();
int gtid = tid;
int last;
#if DEBUG
printf("run_loop_<%d>(lb=%d, ub=%d, st=%d, ch=%d)\n",
(int)sizeof(i64), gtid, tid,
(int)loop_lb, (int)loop_ub, (int)loop_st, loop_chunk);
#endif
// Don't test degenerate cases that should have been discovered by codegen
if (loop_st == 0)
return 0;
if (loop_st > 0 ? loop_lb > loop_ub : loop_lb < loop_ub)
return 0;
__kmpc_dispatch_init_8(&loc, gtid, kmp_sch_guided_simd,
loop_lb, loop_ub, loop_st, loop_chunk);
if (tid == 0) {
// Let the master thread handle the chunks alone
int chunk; // No of current chunk
i64 next_lb; // Lower bound of the next chunk
i64 last_ub; // Upper bound of the last processed chunk
u64 cur; // Number of interations in current chunk
u64 max; // Max allowed iterations for current chunk
int undersized = 0;
chunk = 0;
next_lb = loop_lb;
max = (loop_ub - loop_lb) / loop_st + 1;
// The first chunk can consume all iterations
while (__kmpc_dispatch_next_8(&loc, gtid, &last, &lb, &ub, &st)) {
++ chunk;
#if DEBUG
printf("chunk=%d, lb=%d, ub=%d\n", chunk, (int)lb, (int)ub);
#endif
// Check if previous chunk (it is not the final chunk) is undersized
if (undersized) {
printf("Error with chunk %d\n", chunk);
err++;
}
// Check lower and upper bounds
if (lb != next_lb) {
printf("Error with lb %d, %d, ch %d\n", (int)lb, (int)next_lb, chunk);
err++;
}
if (loop_st > 0) {
if (!(ub <= loop_ub)) {
printf("Error with ub %d, %d, ch %d\n", (int)ub, (int)loop_ub, chunk);
err++;
}
if (!(lb <= ub)) {
printf("Error with bounds %d, %d, %d\n", (int)lb, (int)ub, chunk);
err++;
}
} else {
if (!(ub >= loop_ub)) {
printf("Error with ub %d, %d, %d\n", (int)ub, (int)loop_ub, chunk);
err++;
}
if (!(lb >= ub)) {
printf("Error with bounds %d, %d, %d\n", (int)lb, (int)ub, chunk);
err++;
}
}; // if
// Stride should not change
if (!(st == loop_st)) {
printf("Error with st %d, %d, ch %d\n", (int)st, (int)loop_st, chunk);
err++;
}
cur = (ub - lb) / loop_st + 1;
// Guided scheduling uses FP computations, so current chunk may
// be a bit bigger (+1) than allowed maximum
if (!(cur <= max + 1)) {
printf("Error with iter %llu, %llu\n", cur, max);
err++;
}
// Update maximum for the next chunk
if (cur < max)
max = cur;
next_lb = ub + loop_st;
last_ub = ub;
undersized = (cur < loop_chunk);
}; // while
// Must have at least one chunk
if (!(chunk > 0)) {
printf("Error with chunk %d\n", chunk);
err++;
}
// Must have the right last iteration index
if (loop_st > 0) {
if (!(last_ub <= loop_ub)) {
printf("Error with last1 %d, %d, ch %d\n",
(int)last_ub, (int)loop_ub, chunk);
err++;
}
if (!(last_ub + loop_st > loop_ub)) {
printf("Error with last2 %d, %d, %d, ch %d\n",
(int)last_ub, (int)loop_st, (int)loop_ub, chunk);
err++;
}
} else {
if (!(last_ub >= loop_ub)) {
printf("Error with last1 %d, %d, ch %d\n",
(int)last_ub, (int)loop_ub, chunk);
err++;
}
if (!(last_ub + loop_st < loop_ub)) {
printf("Error with last2 %d, %d, %d, ch %d\n",
(int)last_ub, (int)loop_st, (int)loop_ub, chunk);
err++;
}
}; // if
// Let non-master threads go
loop_sync = 1;
} else {
int i;
// Workers wait for master thread to finish, then call __kmpc_dispatch_next
for (i = 0; i < 1000000; ++ i) {
if (loop_sync != 0) {
break;
}; // if
}; // for i
while (loop_sync == 0) {
delay();
}; // while
// At this moment we do not have any more chunks -- all the chunks already
// processed by master thread
rc = __kmpc_dispatch_next_8(&loc, gtid, &last, &lb, &ub, &st);
if (rc) {
printf("Error return value\n");
err++;
}
}; // if
__kmpc_barrier(&loc, gtid);
if (tid == 0) {
loop_sync = 0; // Restore original state
#if DEBUG
printf("run_loop_64(): at the end\n");
#endif
}; // if
__kmpc_barrier(&loc, gtid);
return err;
} // run_loop
// ---------------------------------------------------------------------------
int run_loop_32(int loop_lb, int loop_ub, int loop_st, int loop_chunk) {
int err = 0;
static int volatile loop_sync = 0;
int lb; // Chunk lower bound
int ub; // Chunk upper bound
int st; // Chunk stride
int rc;
int tid = omp_get_thread_num();
int gtid = tid;
int last;
#if DEBUG
printf("run_loop_<%d>(lb=%d, ub=%d, st=%d, ch=%d)\n",
(int)sizeof(int), gtid, tid,
(int)loop_lb, (int)loop_ub, (int)loop_st, loop_chunk);
#endif
// Don't test degenerate cases that should have been discovered by codegen
if (loop_st == 0)
return 0;
if (loop_st > 0 ? loop_lb > loop_ub : loop_lb < loop_ub)
return 0;
__kmpc_dispatch_init_4(&loc, gtid, kmp_sch_guided_simd,
loop_lb, loop_ub, loop_st, loop_chunk);
if (tid == 0) {
// Let the master thread handle the chunks alone
int chunk; // No of current chunk
int next_lb; // Lower bound of the next chunk
int last_ub; // Upper bound of the last processed chunk
u64 cur; // Number of interations in current chunk
u64 max; // Max allowed iterations for current chunk
int undersized = 0;
chunk = 0;
next_lb = loop_lb;
max = (loop_ub - loop_lb) / loop_st + 1;
// The first chunk can consume all iterations
while (__kmpc_dispatch_next_4(&loc, gtid, &last, &lb, &ub, &st)) {
++ chunk;
#if DEBUG
printf("chunk=%d, lb=%d, ub=%d\n", chunk, (int)lb, (int)ub);
#endif
// Check if previous chunk (it is not the final chunk) is undersized
if (undersized) {
printf("Error with chunk %d\n", chunk);
err++;
}
// Check lower and upper bounds
if (lb != next_lb) {
printf("Error with lb %d, %d, ch %d\n", (int)lb, (int)next_lb, chunk);
err++;
}
if (loop_st > 0) {
if (!(ub <= loop_ub)) {
printf("Error with ub %d, %d, ch %d\n", (int)ub, (int)loop_ub, chunk);
err++;
}
if (!(lb <= ub)) {
printf("Error with bounds %d, %d, %d\n", (int)lb, (int)ub, chunk);
err++;
}
} else {
if (!(ub >= loop_ub)) {
printf("Error with ub %d, %d, %d\n", (int)ub, (int)loop_ub, chunk);
err++;
}
if (!(lb >= ub)) {
printf("Error with bounds %d, %d, %d\n", (int)lb, (int)ub, chunk);
err++;
}
}; // if
// Stride should not change
if (!(st == loop_st)) {
printf("Error with st %d, %d, ch %d\n", (int)st, (int)loop_st, chunk);
err++;
}
cur = (ub - lb) / loop_st + 1;
// Guided scheduling uses FP computations, so current chunk may
// be a bit bigger (+1) than allowed maximum
if (!(cur <= max + 1)) {
printf("Error with iter %llu, %llu\n", cur, max);
err++;
}
// Update maximum for the next chunk
if (cur < max)
max = cur;
next_lb = ub + loop_st;
last_ub = ub;
undersized = (cur < loop_chunk);
}; // while
// Must have at least one chunk
if (!(chunk > 0)) {
printf("Error with chunk %d\n", chunk);
err++;
}
// Must have the right last iteration index
if (loop_st > 0) {
if (!(last_ub <= loop_ub)) {
printf("Error with last1 %d, %d, ch %d\n",
(int)last_ub, (int)loop_ub, chunk);
err++;
}
if (!(last_ub + loop_st > loop_ub)) {
printf("Error with last2 %d, %d, %d, ch %d\n",
(int)last_ub, (int)loop_st, (int)loop_ub, chunk);
err++;
}
} else {
if (!(last_ub >= loop_ub)) {
printf("Error with last1 %d, %d, ch %d\n",
(int)last_ub, (int)loop_ub, chunk);
err++;
}
if (!(last_ub + loop_st < loop_ub)) {
printf("Error with last2 %d, %d, %d, ch %d\n",
(int)last_ub, (int)loop_st, (int)loop_ub, chunk);
err++;
}
}; // if
// Let non-master threads go
loop_sync = 1;
} else {
int i;
// Workers wait for master thread to finish, then call __kmpc_dispatch_next
for (i = 0; i < 1000000; ++ i) {
if (loop_sync != 0) {
break;
}; // if
}; // for i
while (loop_sync == 0) {
delay();
}; // while
// At this moment we do not have any more chunks -- all the chunks already
// processed by the master thread
rc = __kmpc_dispatch_next_4(&loc, gtid, &last, &lb, &ub, &st);
if (rc) {
printf("Error return value\n");
err++;
}
}; // if
__kmpc_barrier(&loc, gtid);
if (tid == 0) {
loop_sync = 0; // Restore original state
#if DEBUG
printf("run_loop<>(): at the end\n");
#endif
}; // if
__kmpc_barrier(&loc, gtid);
return err;
} // run_loop
// ---------------------------------------------------------------------------
int run_64(int num_th)
{
int err = 0;
#pragma omp parallel num_threads(num_th)
{
int chunk;
i64 st, lb, ub;
for (chunk = SIMD_LEN; chunk <= 3*SIMD_LEN; chunk += SIMD_LEN) {
for (st = 1; st <= 3; ++ st) {
for (lb = -3 * num_th * st; lb <= 3 * num_th * st; ++ lb) {
for (ub = lb; ub < lb + num_th * (chunk+1) * st; ++ ub) {
err += run_loop_64(lb, ub, st, chunk);
err += run_loop_64(ub, lb, -st, chunk);
}; // for ub
}; // for lb
}; // for st
}; // for chunk
}
return err;
} // run_all
int run_32(int num_th)
{
int err = 0;
#pragma omp parallel num_threads(num_th)
{
int chunk, st, lb, ub;
for (chunk = SIMD_LEN; chunk <= 3*SIMD_LEN; chunk += SIMD_LEN) {
for (st = 1; st <= 3; ++ st) {
for (lb = -3 * num_th * st; lb <= 3 * num_th * st; ++ lb) {
for (ub = lb; ub < lb + num_th * (chunk+1) * st; ++ ub) {
err += run_loop_32(lb, ub, st, chunk);
err += run_loop_32(ub, lb, -st, chunk);
}; // for ub
}; // for lb
}; // for st
}; // for chunk
}
return err;
} // run_all
// ---------------------------------------------------------------------------
int main()
{
int n, err = 0;
for (n = 1; n <= 4; ++ n) {
err += run_32(n);
err += run_64(n);
}; // for n
if (err)
printf("failed with %d errors\n", err);
else
printf("passed\n");
return err;
}
|
convolution_3x3_pack8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
__m256 _bias0 = bias ? _mm256_loadu_ps(bias + p * 8) : _mm256_set1_ps(0.f);
out.fill(_bias0);
for (int q = 0; q < inch; q++)
{
float* outptr = out;
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* kptr = kernel.channel(p).row(q);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 1 < outw; j += 2)
{
__m256 _sum00 = _mm256_loadu_ps(outptr);
__m256 _sum01 = _mm256_setzero_ps();
__m256 _sum10 = _mm256_loadu_ps(outptr + 8);
__m256 _sum11 = _mm256_setzero_ps();
__m256 _r000 = _mm256_broadcast_ss(r0 + 0);
__m256 _r001 = _mm256_broadcast_ss(r0 + 1);
__m256 _r002 = _mm256_broadcast_ss(r0 + 2);
__m256 _r003 = _mm256_broadcast_ss(r0 + 3);
__m256 _r004 = _mm256_broadcast_ss(r0 + 4);
__m256 _r005 = _mm256_broadcast_ss(r0 + 5);
__m256 _r006 = _mm256_broadcast_ss(r0 + 6);
__m256 _r007 = _mm256_broadcast_ss(r0 + 7);
__m256 _k00 = _mm256_loadu_ps(kptr);
__m256 _k01 = _mm256_loadu_ps(kptr + 8);
__m256 _k02 = _mm256_loadu_ps(kptr + 16);
__m256 _k03 = _mm256_loadu_ps(kptr + 24);
__m256 _k04 = _mm256_loadu_ps(kptr + 32);
__m256 _k05 = _mm256_loadu_ps(kptr + 40);
__m256 _k06 = _mm256_loadu_ps(kptr + 48);
__m256 _k07 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum00 = _mm256_comp_fmadd_ps(_r000, _k00, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r001, _k01, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r002, _k02, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r003, _k03, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r004, _k04, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r005, _k05, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r006, _k06, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r007, _k07, _sum01);
__m256 _r010 = _mm256_broadcast_ss(r0 + 8);
__m256 _r011 = _mm256_broadcast_ss(r0 + 9);
__m256 _r012 = _mm256_broadcast_ss(r0 + 10);
__m256 _r013 = _mm256_broadcast_ss(r0 + 11);
__m256 _r014 = _mm256_broadcast_ss(r0 + 12);
__m256 _r015 = _mm256_broadcast_ss(r0 + 13);
__m256 _r016 = _mm256_broadcast_ss(r0 + 14);
__m256 _r017 = _mm256_broadcast_ss(r0 + 15);
_sum10 = _mm256_comp_fmadd_ps(_r010, _k00, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r011, _k01, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r012, _k02, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r013, _k03, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r014, _k04, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r015, _k05, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r016, _k06, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r017, _k07, _sum11);
__m256 _k10 = _mm256_loadu_ps(kptr);
__m256 _k11 = _mm256_loadu_ps(kptr + 8);
__m256 _k12 = _mm256_loadu_ps(kptr + 16);
__m256 _k13 = _mm256_loadu_ps(kptr + 24);
__m256 _k14 = _mm256_loadu_ps(kptr + 32);
__m256 _k15 = _mm256_loadu_ps(kptr + 40);
__m256 _k16 = _mm256_loadu_ps(kptr + 48);
__m256 _k17 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum00 = _mm256_comp_fmadd_ps(_r010, _k10, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r011, _k11, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r012, _k12, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r013, _k13, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r014, _k14, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r015, _k15, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r016, _k16, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r017, _k17, _sum01);
__m256 _r020 = _mm256_broadcast_ss(r0 + 16);
__m256 _r021 = _mm256_broadcast_ss(r0 + 17);
__m256 _r022 = _mm256_broadcast_ss(r0 + 18);
__m256 _r023 = _mm256_broadcast_ss(r0 + 19);
__m256 _r024 = _mm256_broadcast_ss(r0 + 20);
__m256 _r025 = _mm256_broadcast_ss(r0 + 21);
__m256 _r026 = _mm256_broadcast_ss(r0 + 22);
__m256 _r027 = _mm256_broadcast_ss(r0 + 23);
_sum10 = _mm256_comp_fmadd_ps(_r020, _k10, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r021, _k11, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r022, _k12, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r023, _k13, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r024, _k14, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r025, _k15, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r026, _k16, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r027, _k17, _sum11);
__m256 _k20 = _mm256_loadu_ps(kptr);
__m256 _k21 = _mm256_loadu_ps(kptr + 8);
__m256 _k22 = _mm256_loadu_ps(kptr + 16);
__m256 _k23 = _mm256_loadu_ps(kptr + 24);
__m256 _k24 = _mm256_loadu_ps(kptr + 32);
__m256 _k25 = _mm256_loadu_ps(kptr + 40);
__m256 _k26 = _mm256_loadu_ps(kptr + 48);
__m256 _k27 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum00 = _mm256_comp_fmadd_ps(_r020, _k20, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r021, _k21, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r022, _k22, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r023, _k23, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r024, _k24, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r025, _k25, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r026, _k26, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r027, _k27, _sum01);
__m256 _r030 = _mm256_broadcast_ss(r0 + 24);
__m256 _r031 = _mm256_broadcast_ss(r0 + 25);
__m256 _r032 = _mm256_broadcast_ss(r0 + 26);
__m256 _r033 = _mm256_broadcast_ss(r0 + 27);
__m256 _r034 = _mm256_broadcast_ss(r0 + 28);
__m256 _r035 = _mm256_broadcast_ss(r0 + 29);
__m256 _r036 = _mm256_broadcast_ss(r0 + 30);
__m256 _r037 = _mm256_broadcast_ss(r0 + 31);
_sum10 = _mm256_comp_fmadd_ps(_r030, _k20, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r031, _k21, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r032, _k22, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r033, _k23, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r034, _k24, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r035, _k25, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r036, _k26, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r037, _k27, _sum11);
__m256 _r100 = _mm256_broadcast_ss(r1 + 0);
__m256 _r101 = _mm256_broadcast_ss(r1 + 1);
__m256 _r102 = _mm256_broadcast_ss(r1 + 2);
__m256 _r103 = _mm256_broadcast_ss(r1 + 3);
__m256 _r104 = _mm256_broadcast_ss(r1 + 4);
__m256 _r105 = _mm256_broadcast_ss(r1 + 5);
__m256 _r106 = _mm256_broadcast_ss(r1 + 6);
__m256 _r107 = _mm256_broadcast_ss(r1 + 7);
__m256 _k30 = _mm256_loadu_ps(kptr);
__m256 _k31 = _mm256_loadu_ps(kptr + 8);
__m256 _k32 = _mm256_loadu_ps(kptr + 16);
__m256 _k33 = _mm256_loadu_ps(kptr + 24);
__m256 _k34 = _mm256_loadu_ps(kptr + 32);
__m256 _k35 = _mm256_loadu_ps(kptr + 40);
__m256 _k36 = _mm256_loadu_ps(kptr + 48);
__m256 _k37 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum00 = _mm256_comp_fmadd_ps(_r100, _k30, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r101, _k31, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r102, _k32, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r103, _k33, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r104, _k34, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r105, _k35, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r106, _k36, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r107, _k37, _sum01);
__m256 _r110 = _mm256_broadcast_ss(r1 + 8);
__m256 _r111 = _mm256_broadcast_ss(r1 + 9);
__m256 _r112 = _mm256_broadcast_ss(r1 + 10);
__m256 _r113 = _mm256_broadcast_ss(r1 + 11);
__m256 _r114 = _mm256_broadcast_ss(r1 + 12);
__m256 _r115 = _mm256_broadcast_ss(r1 + 13);
__m256 _r116 = _mm256_broadcast_ss(r1 + 14);
__m256 _r117 = _mm256_broadcast_ss(r1 + 15);
_sum10 = _mm256_comp_fmadd_ps(_r110, _k30, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r111, _k31, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r112, _k32, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r113, _k33, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r114, _k34, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r115, _k35, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r116, _k36, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r117, _k37, _sum11);
__m256 _k40 = _mm256_loadu_ps(kptr);
__m256 _k41 = _mm256_loadu_ps(kptr + 8);
__m256 _k42 = _mm256_loadu_ps(kptr + 16);
__m256 _k43 = _mm256_loadu_ps(kptr + 24);
__m256 _k44 = _mm256_loadu_ps(kptr + 32);
__m256 _k45 = _mm256_loadu_ps(kptr + 40);
__m256 _k46 = _mm256_loadu_ps(kptr + 48);
__m256 _k47 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum00 = _mm256_comp_fmadd_ps(_r110, _k40, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r111, _k41, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r112, _k42, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r113, _k43, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r114, _k44, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r115, _k45, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r116, _k46, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r117, _k47, _sum01);
__m256 _r120 = _mm256_broadcast_ss(r1 + 16);
__m256 _r121 = _mm256_broadcast_ss(r1 + 17);
__m256 _r122 = _mm256_broadcast_ss(r1 + 18);
__m256 _r123 = _mm256_broadcast_ss(r1 + 19);
__m256 _r124 = _mm256_broadcast_ss(r1 + 20);
__m256 _r125 = _mm256_broadcast_ss(r1 + 21);
__m256 _r126 = _mm256_broadcast_ss(r1 + 22);
__m256 _r127 = _mm256_broadcast_ss(r1 + 23);
_sum10 = _mm256_comp_fmadd_ps(_r120, _k40, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r121, _k41, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r122, _k42, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r123, _k43, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r124, _k44, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r125, _k45, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r126, _k46, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r127, _k47, _sum11);
__m256 _k50 = _mm256_loadu_ps(kptr);
__m256 _k51 = _mm256_loadu_ps(kptr + 8);
__m256 _k52 = _mm256_loadu_ps(kptr + 16);
__m256 _k53 = _mm256_loadu_ps(kptr + 24);
__m256 _k54 = _mm256_loadu_ps(kptr + 32);
__m256 _k55 = _mm256_loadu_ps(kptr + 40);
__m256 _k56 = _mm256_loadu_ps(kptr + 48);
__m256 _k57 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum00 = _mm256_comp_fmadd_ps(_r120, _k50, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r121, _k51, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r122, _k52, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r123, _k53, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r124, _k54, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r125, _k55, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r126, _k56, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r127, _k57, _sum01);
__m256 _r130 = _mm256_broadcast_ss(r1 + 24);
__m256 _r131 = _mm256_broadcast_ss(r1 + 25);
__m256 _r132 = _mm256_broadcast_ss(r1 + 26);
__m256 _r133 = _mm256_broadcast_ss(r1 + 27);
__m256 _r134 = _mm256_broadcast_ss(r1 + 28);
__m256 _r135 = _mm256_broadcast_ss(r1 + 29);
__m256 _r136 = _mm256_broadcast_ss(r1 + 30);
__m256 _r137 = _mm256_broadcast_ss(r1 + 31);
_sum10 = _mm256_comp_fmadd_ps(_r130, _k50, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r131, _k51, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r132, _k52, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r133, _k53, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r134, _k54, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r135, _k55, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r136, _k56, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r137, _k57, _sum11);
__m256 _r200 = _mm256_broadcast_ss(r2 + 0);
__m256 _r201 = _mm256_broadcast_ss(r2 + 1);
__m256 _r202 = _mm256_broadcast_ss(r2 + 2);
__m256 _r203 = _mm256_broadcast_ss(r2 + 3);
__m256 _r204 = _mm256_broadcast_ss(r2 + 4);
__m256 _r205 = _mm256_broadcast_ss(r2 + 5);
__m256 _r206 = _mm256_broadcast_ss(r2 + 6);
__m256 _r207 = _mm256_broadcast_ss(r2 + 7);
__m256 _k60 = _mm256_loadu_ps(kptr);
__m256 _k61 = _mm256_loadu_ps(kptr + 8);
__m256 _k62 = _mm256_loadu_ps(kptr + 16);
__m256 _k63 = _mm256_loadu_ps(kptr + 24);
__m256 _k64 = _mm256_loadu_ps(kptr + 32);
__m256 _k65 = _mm256_loadu_ps(kptr + 40);
__m256 _k66 = _mm256_loadu_ps(kptr + 48);
__m256 _k67 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum00 = _mm256_comp_fmadd_ps(_r200, _k60, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r201, _k61, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r202, _k62, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r203, _k63, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r204, _k64, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r205, _k65, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r206, _k66, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r207, _k67, _sum01);
__m256 _r210 = _mm256_broadcast_ss(r2 + 8);
__m256 _r211 = _mm256_broadcast_ss(r2 + 9);
__m256 _r212 = _mm256_broadcast_ss(r2 + 10);
__m256 _r213 = _mm256_broadcast_ss(r2 + 11);
__m256 _r214 = _mm256_broadcast_ss(r2 + 12);
__m256 _r215 = _mm256_broadcast_ss(r2 + 13);
__m256 _r216 = _mm256_broadcast_ss(r2 + 14);
__m256 _r217 = _mm256_broadcast_ss(r2 + 15);
_sum10 = _mm256_comp_fmadd_ps(_r210, _k60, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r211, _k61, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r212, _k62, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r213, _k63, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r214, _k64, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r215, _k65, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r216, _k66, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r217, _k67, _sum11);
__m256 _k70 = _mm256_loadu_ps(kptr);
__m256 _k71 = _mm256_loadu_ps(kptr + 8);
__m256 _k72 = _mm256_loadu_ps(kptr + 16);
__m256 _k73 = _mm256_loadu_ps(kptr + 24);
__m256 _k74 = _mm256_loadu_ps(kptr + 32);
__m256 _k75 = _mm256_loadu_ps(kptr + 40);
__m256 _k76 = _mm256_loadu_ps(kptr + 48);
__m256 _k77 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum00 = _mm256_comp_fmadd_ps(_r210, _k70, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r211, _k71, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r212, _k72, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r213, _k73, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r214, _k74, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r215, _k75, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r216, _k76, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r217, _k77, _sum01);
__m256 _r220 = _mm256_broadcast_ss(r2 + 16);
__m256 _r221 = _mm256_broadcast_ss(r2 + 17);
__m256 _r222 = _mm256_broadcast_ss(r2 + 18);
__m256 _r223 = _mm256_broadcast_ss(r2 + 19);
__m256 _r224 = _mm256_broadcast_ss(r2 + 20);
__m256 _r225 = _mm256_broadcast_ss(r2 + 21);
__m256 _r226 = _mm256_broadcast_ss(r2 + 22);
__m256 _r227 = _mm256_broadcast_ss(r2 + 23);
_sum10 = _mm256_comp_fmadd_ps(_r220, _k70, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r221, _k71, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r222, _k72, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r223, _k73, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r224, _k74, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r225, _k75, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r226, _k76, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r227, _k77, _sum11);
__m256 _k80 = _mm256_loadu_ps(kptr);
__m256 _k81 = _mm256_loadu_ps(kptr + 8);
__m256 _k82 = _mm256_loadu_ps(kptr + 16);
__m256 _k83 = _mm256_loadu_ps(kptr + 24);
__m256 _k84 = _mm256_loadu_ps(kptr + 32);
__m256 _k85 = _mm256_loadu_ps(kptr + 40);
__m256 _k86 = _mm256_loadu_ps(kptr + 48);
__m256 _k87 = _mm256_loadu_ps(kptr + 56);
_sum00 = _mm256_comp_fmadd_ps(_r220, _k80, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r221, _k81, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r222, _k82, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r223, _k83, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r224, _k84, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r225, _k85, _sum01);
_sum00 = _mm256_comp_fmadd_ps(_r226, _k86, _sum00);
_sum01 = _mm256_comp_fmadd_ps(_r227, _k87, _sum01);
__m256 _r230 = _mm256_broadcast_ss(r2 + 24);
__m256 _r231 = _mm256_broadcast_ss(r2 + 25);
__m256 _r232 = _mm256_broadcast_ss(r2 + 26);
__m256 _r233 = _mm256_broadcast_ss(r2 + 27);
__m256 _r234 = _mm256_broadcast_ss(r2 + 28);
__m256 _r235 = _mm256_broadcast_ss(r2 + 29);
__m256 _r236 = _mm256_broadcast_ss(r2 + 30);
__m256 _r237 = _mm256_broadcast_ss(r2 + 31);
_sum10 = _mm256_comp_fmadd_ps(_r230, _k80, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r231, _k81, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r232, _k82, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r233, _k83, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r234, _k84, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r235, _k85, _sum11);
_sum10 = _mm256_comp_fmadd_ps(_r236, _k86, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_r237, _k87, _sum11);
kptr -= 64 * 8;
_sum00 = _mm256_add_ps(_sum00, _sum01);
_sum10 = _mm256_add_ps(_sum10, _sum11);
_mm256_storeu_ps(outptr, _sum00);
_mm256_storeu_ps(outptr + 8, _sum10);
r0 += 16;
r1 += 16;
r2 += 16;
outptr += 16;
}
for (; j < outw; j++)
{
__m256 _sum0 = _mm256_loadu_ps(outptr);
__m256 _sum1 = _mm256_setzero_ps();
__m256 _r000 = _mm256_broadcast_ss(r0 + 0);
__m256 _r001 = _mm256_broadcast_ss(r0 + 1);
__m256 _r002 = _mm256_broadcast_ss(r0 + 2);
__m256 _r003 = _mm256_broadcast_ss(r0 + 3);
__m256 _r004 = _mm256_broadcast_ss(r0 + 4);
__m256 _r005 = _mm256_broadcast_ss(r0 + 5);
__m256 _r006 = _mm256_broadcast_ss(r0 + 6);
__m256 _r007 = _mm256_broadcast_ss(r0 + 7);
__m256 _k00 = _mm256_loadu_ps(kptr);
__m256 _k01 = _mm256_loadu_ps(kptr + 8);
__m256 _k02 = _mm256_loadu_ps(kptr + 16);
__m256 _k03 = _mm256_loadu_ps(kptr + 24);
__m256 _k04 = _mm256_loadu_ps(kptr + 32);
__m256 _k05 = _mm256_loadu_ps(kptr + 40);
__m256 _k06 = _mm256_loadu_ps(kptr + 48);
__m256 _k07 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum0 = _mm256_comp_fmadd_ps(_r000, _k00, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r001, _k01, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r002, _k02, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r003, _k03, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r004, _k04, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r005, _k05, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r006, _k06, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r007, _k07, _sum1);
__m256 _r010 = _mm256_broadcast_ss(r0 + 8);
__m256 _r011 = _mm256_broadcast_ss(r0 + 9);
__m256 _r012 = _mm256_broadcast_ss(r0 + 10);
__m256 _r013 = _mm256_broadcast_ss(r0 + 11);
__m256 _r014 = _mm256_broadcast_ss(r0 + 12);
__m256 _r015 = _mm256_broadcast_ss(r0 + 13);
__m256 _r016 = _mm256_broadcast_ss(r0 + 14);
__m256 _r017 = _mm256_broadcast_ss(r0 + 15);
__m256 _k10 = _mm256_loadu_ps(kptr);
__m256 _k11 = _mm256_loadu_ps(kptr + 8);
__m256 _k12 = _mm256_loadu_ps(kptr + 16);
__m256 _k13 = _mm256_loadu_ps(kptr + 24);
__m256 _k14 = _mm256_loadu_ps(kptr + 32);
__m256 _k15 = _mm256_loadu_ps(kptr + 40);
__m256 _k16 = _mm256_loadu_ps(kptr + 48);
__m256 _k17 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum0 = _mm256_comp_fmadd_ps(_r010, _k10, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r011, _k11, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r012, _k12, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r013, _k13, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r014, _k14, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r015, _k15, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r016, _k16, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r017, _k17, _sum1);
__m256 _r020 = _mm256_broadcast_ss(r0 + 16);
__m256 _r021 = _mm256_broadcast_ss(r0 + 17);
__m256 _r022 = _mm256_broadcast_ss(r0 + 18);
__m256 _r023 = _mm256_broadcast_ss(r0 + 19);
__m256 _r024 = _mm256_broadcast_ss(r0 + 20);
__m256 _r025 = _mm256_broadcast_ss(r0 + 21);
__m256 _r026 = _mm256_broadcast_ss(r0 + 22);
__m256 _r027 = _mm256_broadcast_ss(r0 + 23);
__m256 _k20 = _mm256_loadu_ps(kptr);
__m256 _k21 = _mm256_loadu_ps(kptr + 8);
__m256 _k22 = _mm256_loadu_ps(kptr + 16);
__m256 _k23 = _mm256_loadu_ps(kptr + 24);
__m256 _k24 = _mm256_loadu_ps(kptr + 32);
__m256 _k25 = _mm256_loadu_ps(kptr + 40);
__m256 _k26 = _mm256_loadu_ps(kptr + 48);
__m256 _k27 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum0 = _mm256_comp_fmadd_ps(_r020, _k20, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r021, _k21, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r022, _k22, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r023, _k23, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r024, _k24, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r025, _k25, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r026, _k26, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r027, _k27, _sum1);
__m256 _r100 = _mm256_broadcast_ss(r1 + 0);
__m256 _r101 = _mm256_broadcast_ss(r1 + 1);
__m256 _r102 = _mm256_broadcast_ss(r1 + 2);
__m256 _r103 = _mm256_broadcast_ss(r1 + 3);
__m256 _r104 = _mm256_broadcast_ss(r1 + 4);
__m256 _r105 = _mm256_broadcast_ss(r1 + 5);
__m256 _r106 = _mm256_broadcast_ss(r1 + 6);
__m256 _r107 = _mm256_broadcast_ss(r1 + 7);
__m256 _k30 = _mm256_loadu_ps(kptr);
__m256 _k31 = _mm256_loadu_ps(kptr + 8);
__m256 _k32 = _mm256_loadu_ps(kptr + 16);
__m256 _k33 = _mm256_loadu_ps(kptr + 24);
__m256 _k34 = _mm256_loadu_ps(kptr + 32);
__m256 _k35 = _mm256_loadu_ps(kptr + 40);
__m256 _k36 = _mm256_loadu_ps(kptr + 48);
__m256 _k37 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum0 = _mm256_comp_fmadd_ps(_r100, _k30, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r101, _k31, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r102, _k32, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r103, _k33, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r104, _k34, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r105, _k35, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r106, _k36, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r107, _k37, _sum1);
__m256 _r110 = _mm256_broadcast_ss(r1 + 8);
__m256 _r111 = _mm256_broadcast_ss(r1 + 9);
__m256 _r112 = _mm256_broadcast_ss(r1 + 10);
__m256 _r113 = _mm256_broadcast_ss(r1 + 11);
__m256 _r114 = _mm256_broadcast_ss(r1 + 12);
__m256 _r115 = _mm256_broadcast_ss(r1 + 13);
__m256 _r116 = _mm256_broadcast_ss(r1 + 14);
__m256 _r117 = _mm256_broadcast_ss(r1 + 15);
__m256 _k40 = _mm256_loadu_ps(kptr);
__m256 _k41 = _mm256_loadu_ps(kptr + 8);
__m256 _k42 = _mm256_loadu_ps(kptr + 16);
__m256 _k43 = _mm256_loadu_ps(kptr + 24);
__m256 _k44 = _mm256_loadu_ps(kptr + 32);
__m256 _k45 = _mm256_loadu_ps(kptr + 40);
__m256 _k46 = _mm256_loadu_ps(kptr + 48);
__m256 _k47 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum0 = _mm256_comp_fmadd_ps(_r110, _k40, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r111, _k41, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r112, _k42, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r113, _k43, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r114, _k44, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r115, _k45, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r116, _k46, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r117, _k47, _sum1);
__m256 _r120 = _mm256_broadcast_ss(r1 + 16);
__m256 _r121 = _mm256_broadcast_ss(r1 + 17);
__m256 _r122 = _mm256_broadcast_ss(r1 + 18);
__m256 _r123 = _mm256_broadcast_ss(r1 + 19);
__m256 _r124 = _mm256_broadcast_ss(r1 + 20);
__m256 _r125 = _mm256_broadcast_ss(r1 + 21);
__m256 _r126 = _mm256_broadcast_ss(r1 + 22);
__m256 _r127 = _mm256_broadcast_ss(r1 + 23);
__m256 _k50 = _mm256_loadu_ps(kptr);
__m256 _k51 = _mm256_loadu_ps(kptr + 8);
__m256 _k52 = _mm256_loadu_ps(kptr + 16);
__m256 _k53 = _mm256_loadu_ps(kptr + 24);
__m256 _k54 = _mm256_loadu_ps(kptr + 32);
__m256 _k55 = _mm256_loadu_ps(kptr + 40);
__m256 _k56 = _mm256_loadu_ps(kptr + 48);
__m256 _k57 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum0 = _mm256_comp_fmadd_ps(_r120, _k50, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r121, _k51, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r122, _k52, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r123, _k53, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r124, _k54, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r125, _k55, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r126, _k56, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r127, _k57, _sum1);
__m256 _r200 = _mm256_broadcast_ss(r2 + 0);
__m256 _r201 = _mm256_broadcast_ss(r2 + 1);
__m256 _r202 = _mm256_broadcast_ss(r2 + 2);
__m256 _r203 = _mm256_broadcast_ss(r2 + 3);
__m256 _r204 = _mm256_broadcast_ss(r2 + 4);
__m256 _r205 = _mm256_broadcast_ss(r2 + 5);
__m256 _r206 = _mm256_broadcast_ss(r2 + 6);
__m256 _r207 = _mm256_broadcast_ss(r2 + 7);
__m256 _k60 = _mm256_loadu_ps(kptr);
__m256 _k61 = _mm256_loadu_ps(kptr + 8);
__m256 _k62 = _mm256_loadu_ps(kptr + 16);
__m256 _k63 = _mm256_loadu_ps(kptr + 24);
__m256 _k64 = _mm256_loadu_ps(kptr + 32);
__m256 _k65 = _mm256_loadu_ps(kptr + 40);
__m256 _k66 = _mm256_loadu_ps(kptr + 48);
__m256 _k67 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum0 = _mm256_comp_fmadd_ps(_r200, _k60, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r201, _k61, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r202, _k62, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r203, _k63, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r204, _k64, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r205, _k65, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r206, _k66, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r207, _k67, _sum1);
__m256 _r210 = _mm256_broadcast_ss(r2 + 8);
__m256 _r211 = _mm256_broadcast_ss(r2 + 9);
__m256 _r212 = _mm256_broadcast_ss(r2 + 10);
__m256 _r213 = _mm256_broadcast_ss(r2 + 11);
__m256 _r214 = _mm256_broadcast_ss(r2 + 12);
__m256 _r215 = _mm256_broadcast_ss(r2 + 13);
__m256 _r216 = _mm256_broadcast_ss(r2 + 14);
__m256 _r217 = _mm256_broadcast_ss(r2 + 15);
__m256 _k70 = _mm256_loadu_ps(kptr);
__m256 _k71 = _mm256_loadu_ps(kptr + 8);
__m256 _k72 = _mm256_loadu_ps(kptr + 16);
__m256 _k73 = _mm256_loadu_ps(kptr + 24);
__m256 _k74 = _mm256_loadu_ps(kptr + 32);
__m256 _k75 = _mm256_loadu_ps(kptr + 40);
__m256 _k76 = _mm256_loadu_ps(kptr + 48);
__m256 _k77 = _mm256_loadu_ps(kptr + 56);
kptr += 64;
_sum0 = _mm256_comp_fmadd_ps(_r210, _k70, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r211, _k71, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r212, _k72, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r213, _k73, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r214, _k74, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r215, _k75, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r216, _k76, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r217, _k77, _sum1);
__m256 _r220 = _mm256_broadcast_ss(r2 + 16);
__m256 _r221 = _mm256_broadcast_ss(r2 + 17);
__m256 _r222 = _mm256_broadcast_ss(r2 + 18);
__m256 _r223 = _mm256_broadcast_ss(r2 + 19);
__m256 _r224 = _mm256_broadcast_ss(r2 + 20);
__m256 _r225 = _mm256_broadcast_ss(r2 + 21);
__m256 _r226 = _mm256_broadcast_ss(r2 + 22);
__m256 _r227 = _mm256_broadcast_ss(r2 + 23);
__m256 _k80 = _mm256_loadu_ps(kptr);
__m256 _k81 = _mm256_loadu_ps(kptr + 8);
__m256 _k82 = _mm256_loadu_ps(kptr + 16);
__m256 _k83 = _mm256_loadu_ps(kptr + 24);
__m256 _k84 = _mm256_loadu_ps(kptr + 32);
__m256 _k85 = _mm256_loadu_ps(kptr + 40);
__m256 _k86 = _mm256_loadu_ps(kptr + 48);
__m256 _k87 = _mm256_loadu_ps(kptr + 56);
_sum0 = _mm256_comp_fmadd_ps(_r220, _k80, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r221, _k81, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r222, _k82, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r223, _k83, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r224, _k84, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r225, _k85, _sum1);
_sum0 = _mm256_comp_fmadd_ps(_r226, _k86, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_r227, _k87, _sum1);
kptr -= 64 * 8;
_sum0 = _mm256_add_ps(_sum0, _sum1);
_mm256_storeu_ps(outptr, _sum0);
r0 += 8;
r1 += 8;
r2 += 8;
outptr += 8;
}
r0 += 16;
r1 += 16;
r2 += 16;
}
}
}
}
static void conv3x3s1_winograd64_transform_kernel_pack8_avx(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch, const Option& opt)
{
// winograd63 transform kernel
Mat kernel_tm;
kernel_tm.create(8 * 8, inch, outch);
const float ktm[8][3] = {
{1.0f, 0.0f, 0.0f},
{-2.0f / 9, -2.0f / 9, -2.0f / 9},
{-2.0f / 9, 2.0f / 9, -2.0f / 9},
{1.0f / 90, 1.0f / 45, 2.0f / 45},
{1.0f / 90, -1.0f / 45, 2.0f / 45},
{1.0f / 45, 1.0f / 90, 1.0f / 180},
{1.0f / 45, -1.0f / 90, 1.0f / 180},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i = 0; i < 8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j = 0; j < 8; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 8; i++)
{
kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 64-inch-outch
// dst = 8b-8a-inch/8a-64-outch/8b;
kernel_tm_pack8.create(inch / 8, 64, outch / 8, (size_t)4u * 64, 64);
int q = 0;
for (; q + 7 < outch; q += 8)
{
const Mat k0 = kernel_tm.channel(q);
const Mat k1 = kernel_tm.channel(q + 1);
const Mat k2 = kernel_tm.channel(q + 2);
const Mat k3 = kernel_tm.channel(q + 3);
const Mat k4 = kernel_tm.channel(q + 4);
const Mat k5 = kernel_tm.channel(q + 5);
const Mat k6 = kernel_tm.channel(q + 6);
const Mat k7 = kernel_tm.channel(q + 7);
Mat g0 = kernel_tm_pack8.channel(q / 8);
for (int k = 0; k < 64; k++)
{
float* g00 = g0.row(k);
for (int p = 0; p + 7 < inch; p += 8)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p + 1);
const float* k02 = k0.row(p + 2);
const float* k03 = k0.row(p + 3);
const float* k04 = k0.row(p + 4);
const float* k05 = k0.row(p + 5);
const float* k06 = k0.row(p + 6);
const float* k07 = k0.row(p + 7);
const float* k10 = k1.row(p);
const float* k11 = k1.row(p + 1);
const float* k12 = k1.row(p + 2);
const float* k13 = k1.row(p + 3);
const float* k14 = k1.row(p + 4);
const float* k15 = k1.row(p + 5);
const float* k16 = k1.row(p + 6);
const float* k17 = k1.row(p + 7);
const float* k20 = k2.row(p);
const float* k21 = k2.row(p + 1);
const float* k22 = k2.row(p + 2);
const float* k23 = k2.row(p + 3);
const float* k24 = k2.row(p + 4);
const float* k25 = k2.row(p + 5);
const float* k26 = k2.row(p + 6);
const float* k27 = k2.row(p + 7);
const float* k30 = k3.row(p);
const float* k31 = k3.row(p + 1);
const float* k32 = k3.row(p + 2);
const float* k33 = k3.row(p + 3);
const float* k34 = k3.row(p + 4);
const float* k35 = k3.row(p + 5);
const float* k36 = k3.row(p + 6);
const float* k37 = k3.row(p + 7);
const float* k40 = k4.row(p);
const float* k41 = k4.row(p + 1);
const float* k42 = k4.row(p + 2);
const float* k43 = k4.row(p + 3);
const float* k44 = k4.row(p + 4);
const float* k45 = k4.row(p + 5);
const float* k46 = k4.row(p + 6);
const float* k47 = k4.row(p + 7);
const float* k50 = k5.row(p);
const float* k51 = k5.row(p + 1);
const float* k52 = k5.row(p + 2);
const float* k53 = k5.row(p + 3);
const float* k54 = k5.row(p + 4);
const float* k55 = k5.row(p + 5);
const float* k56 = k5.row(p + 6);
const float* k57 = k5.row(p + 7);
const float* k60 = k6.row(p);
const float* k61 = k6.row(p + 1);
const float* k62 = k6.row(p + 2);
const float* k63 = k6.row(p + 3);
const float* k64 = k6.row(p + 4);
const float* k65 = k6.row(p + 5);
const float* k66 = k6.row(p + 6);
const float* k67 = k6.row(p + 7);
const float* k70 = k7.row(p);
const float* k71 = k7.row(p + 1);
const float* k72 = k7.row(p + 2);
const float* k73 = k7.row(p + 3);
const float* k74 = k7.row(p + 4);
const float* k75 = k7.row(p + 5);
const float* k76 = k7.row(p + 6);
const float* k77 = k7.row(p + 7);
g00[0] = k00[k];
g00[1] = k10[k];
g00[2] = k20[k];
g00[3] = k30[k];
g00[4] = k40[k];
g00[5] = k50[k];
g00[6] = k60[k];
g00[7] = k70[k];
g00[8] = k01[k];
g00[9] = k11[k];
g00[10] = k21[k];
g00[11] = k31[k];
g00[12] = k41[k];
g00[13] = k51[k];
g00[14] = k61[k];
g00[15] = k71[k];
g00[16] = k02[k];
g00[17] = k12[k];
g00[18] = k22[k];
g00[19] = k32[k];
g00[20] = k42[k];
g00[21] = k52[k];
g00[22] = k62[k];
g00[23] = k72[k];
g00[24] = k03[k];
g00[25] = k13[k];
g00[26] = k23[k];
g00[27] = k33[k];
g00[28] = k43[k];
g00[29] = k53[k];
g00[30] = k63[k];
g00[31] = k73[k];
g00[32] = k04[k];
g00[33] = k14[k];
g00[34] = k24[k];
g00[35] = k34[k];
g00[36] = k44[k];
g00[37] = k54[k];
g00[38] = k64[k];
g00[39] = k74[k];
g00[40] = k05[k];
g00[41] = k15[k];
g00[42] = k25[k];
g00[43] = k35[k];
g00[44] = k45[k];
g00[45] = k55[k];
g00[46] = k65[k];
g00[47] = k75[k];
g00[48] = k06[k];
g00[49] = k16[k];
g00[50] = k26[k];
g00[51] = k36[k];
g00[52] = k46[k];
g00[53] = k56[k];
g00[54] = k66[k];
g00[55] = k76[k];
g00[56] = k07[k];
g00[57] = k17[k];
g00[58] = k27[k];
g00[59] = k37[k];
g00[60] = k47[k];
g00[61] = k57[k];
g00[62] = k67[k];
g00[63] = k77[k];
g00 += 64;
}
}
}
}
static void conv3x3s1_winograd64_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm / 8 * h_tm / 8;
bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[8][8][8];
// tile
for (int i = 0; i < h_tm / 8; i++)
{
for (int j = 0; j < w_tm / 8; j++)
{
const float* r0 = img0.row(i * 6) + (j * 6) * 8;
for (int m = 0; m < 8; m++)
{
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r04 = _mm256_loadu_ps(r0 + 32);
__m256 _r05 = _mm256_loadu_ps(r0 + 40);
__m256 _r06 = _mm256_loadu_ps(r0 + 48);
__m256 _r07 = _mm256_loadu_ps(r0 + 56);
__m256 _tmp0m = _mm256_fmadd_1_ps(_mm256_sub_ps(_r00, _r06), _mm256_sub_ps(_r04, _r02), 5.25f);
__m256 _tmp7m = _mm256_fmadd_1_ps(_mm256_sub_ps(_r07, _r01), _mm256_sub_ps(_r03, _r05), 5.25f);
_mm256_storeu_ps(tmp[0][m], _tmp0m);
_mm256_storeu_ps(tmp[7][m], _tmp7m);
// tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25;
// tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25;
__m256 _tmp12a = _mm256_fmrsub_1_ps(_mm256_add_ps(_r02, _r06), _r04, 4.25f);
__m256 _tmp12b = _mm256_fmrsub_1_ps(_mm256_add_ps(_r01, _r05), _r03, 4.25f);
// float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25);
// float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25);
__m256 _tmp1m = _mm256_add_ps(_tmp12a, _tmp12b);
__m256 _tmp2m = _mm256_sub_ps(_tmp12a, _tmp12b);
_mm256_storeu_ps(tmp[1][m], _tmp1m);
_mm256_storeu_ps(tmp[2][m], _tmp2m);
// tmp[1][m] = tmp12a + tmp12b;
// tmp[2][m] = tmp12a - tmp12b;
__m256 _tmp34a = _mm256_fmrsub_1_ps(_mm256_fmadd_1_ps(_r06, _r02, 0.25f), _r04, 1.25f);
__m256 _tmp34b = _mm256_fmadd_1_ps(_mm256_fmrsub_1_ps(_mm256_mul_ps(_r01, _mm256_set1_ps(0.5f)), _r03, 2.5f), _r05, 2.f);
// float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25);
// float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2);
__m256 _tmp3m = _mm256_add_ps(_tmp34a, _tmp34b);
__m256 _tmp4m = _mm256_sub_ps(_tmp34a, _tmp34b);
_mm256_storeu_ps(tmp[3][m], _tmp3m);
_mm256_storeu_ps(tmp[4][m], _tmp4m);
// tmp[3][m] = tmp34a + tmp34b;
// tmp[4][m] = tmp34a - tmp34b;
__m256 _tmp56a = _mm256_fmadd_1_ps(_r06, _mm256_fmrsub_1_ps(_r02, _r04, 1.25f), 4.f);
__m256 _tmp56b = _mm256_fmadd_1_ps(_mm256_fmrsub_1_ps(_mm256_mul_ps(_r01, _mm256_set1_ps(2.f)), _r03, 2.5f), _r05, 0.5f);
// float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4);
// float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5);
__m256 _tmp5m = _mm256_add_ps(_tmp56a, _tmp56b);
__m256 _tmp6m = _mm256_sub_ps(_tmp56a, _tmp56b);
_mm256_storeu_ps(tmp[5][m], _tmp5m);
_mm256_storeu_ps(tmp[6][m], _tmp6m);
// tmp[5][m] = tmp56a + tmp56b;
// tmp[6][m] = tmp56a - tmp56b;
r0 += w * 8;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 8 + j) * 8;
float* r0_tm_1 = r0_tm_0 + tiles * 8;
float* r0_tm_2 = r0_tm_0 + tiles * 16;
float* r0_tm_3 = r0_tm_0 + tiles * 24;
float* r0_tm_4 = r0_tm_0 + tiles * 32;
float* r0_tm_5 = r0_tm_0 + tiles * 40;
float* r0_tm_6 = r0_tm_0 + tiles * 48;
float* r0_tm_7 = r0_tm_0 + tiles * 56;
for (int m = 0; m < 8; m++)
{
__m256 _tmp00 = _mm256_loadu_ps(tmp[m][0]);
__m256 _tmp01 = _mm256_loadu_ps(tmp[m][1]);
__m256 _tmp02 = _mm256_loadu_ps(tmp[m][2]);
__m256 _tmp03 = _mm256_loadu_ps(tmp[m][3]);
__m256 _tmp04 = _mm256_loadu_ps(tmp[m][4]);
__m256 _tmp05 = _mm256_loadu_ps(tmp[m][5]);
__m256 _tmp06 = _mm256_loadu_ps(tmp[m][6]);
__m256 _tmp07 = _mm256_loadu_ps(tmp[m][7]);
__m256 _r0tm0 = _mm256_fmadd_1_ps(_mm256_sub_ps(_tmp00, _tmp06), _mm256_sub_ps(_tmp04, _tmp02), 5.25f);
__m256 _r0tm7 = _mm256_fmadd_1_ps(_mm256_sub_ps(_tmp07, _tmp01), _mm256_sub_ps(_tmp03, _tmp05), 5.25f);
// r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25;
// r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25;
__m256 _tmp12a = _mm256_fmrsub_1_ps(_mm256_add_ps(_tmp02, _tmp06), _tmp04, 4.25f);
__m256 _tmp12b = _mm256_fmrsub_1_ps(_mm256_add_ps(_tmp01, _tmp05), _tmp03, 4.25f);
// float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25);
// float tmp12b = (tmp0[1] + tmp0[5] - tmp0[3] * 4.25);
__m256 _r0tm1 = _mm256_add_ps(_tmp12a, _tmp12b);
__m256 _r0tm2 = _mm256_sub_ps(_tmp12a, _tmp12b);
// r0_tm[1] = tmp12a + tmp12b;
// r0_tm[2] = tmp12a - tmp12b;
__m256 _tmp34a = _mm256_fmrsub_1_ps(_mm256_fmadd_1_ps(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f);
__m256 _tmp34b = _mm256_fmadd_1_ps(_mm256_fmrsub_1_ps(_mm256_mul_ps(_tmp01, _mm256_set1_ps(0.5f)), _tmp03, 2.5f), _tmp05, 2.f);
// float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25);
// float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2);
__m256 _r0tm3 = _mm256_add_ps(_tmp34a, _tmp34b);
__m256 _r0tm4 = _mm256_sub_ps(_tmp34a, _tmp34b);
// r0_tm[3] = tmp34a + tmp34b;
// r0_tm[4] = tmp34a - tmp34b;
__m256 _tmp56a = _mm256_fmadd_1_ps(_tmp06, _mm256_fmrsub_1_ps(_tmp02, _tmp04, 1.25f), 4.f);
__m256 _tmp56b = _mm256_fmadd_1_ps(_mm256_fmrsub_1_ps(_mm256_mul_ps(_tmp01, _mm256_set1_ps(2.f)), _tmp03, 2.5f), _tmp05, 0.5f);
// float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4);
// float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5);
__m256 _r0tm5 = _mm256_add_ps(_tmp56a, _tmp56b);
__m256 _r0tm6 = _mm256_sub_ps(_tmp56a, _tmp56b);
// r0_tm[5] = tmp56a + tmp56b;
// r0_tm[6] = tmp56a - tmp56b;
_mm256_storeu_ps(r0_tm_0, _r0tm0);
_mm256_storeu_ps(r0_tm_1, _r0tm1);
_mm256_storeu_ps(r0_tm_2, _r0tm2);
_mm256_storeu_ps(r0_tm_3, _r0tm3);
_mm256_storeu_ps(r0_tm_4, _r0tm4);
_mm256_storeu_ps(r0_tm_5, _r0tm5);
_mm256_storeu_ps(r0_tm_6, _r0tm6);
_mm256_storeu_ps(r0_tm_7, _r0tm7);
r0_tm_0 += tiles * 64;
r0_tm_1 += tiles * 64;
r0_tm_2 += tiles * 64;
r0_tm_3 += tiles * 64;
r0_tm_4 += tiles * 64;
r0_tm_5 += tiles * 64;
r0_tm_6 += tiles * 64;
r0_tm_7 += tiles * 64;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = h_tm / 8 * w_tm / 8;
Mat bottom_blob_tm2;
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 64, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 11 < tiles; i += 12)
{
float* tm2p = tm2.row(i / 12);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(r0);
__m256 _r1 = _mm256_loadu_ps(r0 + 8);
__m256 _r2 = _mm256_loadu_ps(r0 + 16);
__m256 _r3 = _mm256_loadu_ps(r0 + 24);
__m256 _r4 = _mm256_loadu_ps(r0 + 32);
__m256 _r5 = _mm256_loadu_ps(r0 + 40);
__m256 _r6 = _mm256_loadu_ps(r0 + 48);
__m256 _r7 = _mm256_loadu_ps(r0 + 56);
__m256 _r8 = _mm256_loadu_ps(r0 + 64);
__m256 _r9 = _mm256_loadu_ps(r0 + 72);
__m256 _r10 = _mm256_loadu_ps(r0 + 80);
__m256 _r11 = _mm256_loadu_ps(r0 + 88);
_mm256_storeu_ps(tm2p, _r0);
_mm256_storeu_ps(tm2p + 8, _r1);
_mm256_storeu_ps(tm2p + 16, _r2);
_mm256_storeu_ps(tm2p + 24, _r3);
_mm256_storeu_ps(tm2p + 32, _r4);
_mm256_storeu_ps(tm2p + 40, _r5);
_mm256_storeu_ps(tm2p + 48, _r6);
_mm256_storeu_ps(tm2p + 56, _r7);
_mm256_storeu_ps(tm2p + 64, _r8);
_mm256_storeu_ps(tm2p + 72, _r9);
_mm256_storeu_ps(tm2p + 80, _r10);
_mm256_storeu_ps(tm2p + 88, _r11);
tm2p += 96;
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i + 7 < tiles; i += 8)
{
float* tm2p = tm2.row(i / 12 + (i % 12) / 8);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(r0);
__m256 _r1 = _mm256_loadu_ps(r0 + 8);
_mm256_storeu_ps(tm2p, _r0);
_mm256_storeu_ps(tm2p + 8, _r1);
__m256 _r2 = _mm256_loadu_ps(r0 + 16);
__m256 _r3 = _mm256_loadu_ps(r0 + 24);
_mm256_storeu_ps(tm2p + 16, _r2);
_mm256_storeu_ps(tm2p + 24, _r3);
__m256 _r4 = _mm256_loadu_ps(r0 + 32);
__m256 _r5 = _mm256_loadu_ps(r0 + 40);
_mm256_storeu_ps(tm2p + 32, _r4);
_mm256_storeu_ps(tm2p + 40, _r5);
__m256 _r6 = _mm256_loadu_ps(r0 + 48);
__m256 _r7 = _mm256_loadu_ps(r0 + 56);
_mm256_storeu_ps(tm2p + 48, _r6);
_mm256_storeu_ps(tm2p + 56, _r7);
tm2p += 64;
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i + 3 < tiles; i += 4)
{
float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(r0);
__m256 _r1 = _mm256_loadu_ps(r0 + 8);
_mm256_storeu_ps(tm2p, _r0);
_mm256_storeu_ps(tm2p + 8, _r1);
__m256 _r2 = _mm256_loadu_ps(r0 + 16);
__m256 _r3 = _mm256_loadu_ps(r0 + 24);
_mm256_storeu_ps(tm2p + 16, _r2);
_mm256_storeu_ps(tm2p + 24, _r3);
tm2p += 32;
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i + 1 < tiles; i += 2)
{
float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(r0);
__m256 _r1 = _mm256_loadu_ps(r0 + 8);
_mm256_storeu_ps(tm2p, _r0);
_mm256_storeu_ps(tm2p + 8, _r1);
tm2p += 16;
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i < tiles; i++)
{
float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(r0);
_mm256_storeu_ps(tm2p, _r0);
tm2p += 8;
r0 += bottom_blob_tm.cstep * 8;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 64, outch, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* k01 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
__m256 _sum0 = _mm256_set1_ps(0.f);
__m256 _sum1 = _mm256_set1_ps(0.f);
__m256 _sum2 = _mm256_set1_ps(0.f);
__m256 _sum3 = _mm256_set1_ps(0.f);
__m256 _sum4 = _mm256_set1_ps(0.f);
__m256 _sum5 = _mm256_set1_ps(0.f);
__m256 _sum6 = _mm256_set1_ps(0.f);
__m256 _sum7 = _mm256_set1_ps(0.f);
__m256 _sum8 = _mm256_set1_ps(0.f);
__m256 _sum9 = _mm256_set1_ps(0.f);
__m256 _sum10 = _mm256_set1_ps(0.f);
__m256 _sum11 = _mm256_set1_ps(0.f);
for (; nn > 0; nn--)
{
__m256 _k01 = _mm256_loadu_ps(k01);
__m256 _r00 = _mm256_broadcast_ss(r0 + 0);
__m256 _r01 = _mm256_broadcast_ss(r0 + 8);
__m256 _r02 = _mm256_broadcast_ss(r0 + 16);
__m256 _r03 = _mm256_broadcast_ss(r0 + 24);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3);
__m256 _r04 = _mm256_broadcast_ss(r0 + 32);
__m256 _r05 = _mm256_broadcast_ss(r0 + 40);
__m256 _r06 = _mm256_broadcast_ss(r0 + 48);
__m256 _r07 = _mm256_broadcast_ss(r0 + 56);
_sum4 = _mm256_comp_fmadd_ps(_k01, _r04, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_k01, _r05, _sum5);
_sum6 = _mm256_comp_fmadd_ps(_k01, _r06, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_k01, _r07, _sum7);
__m256 _r08 = _mm256_broadcast_ss(r0 + 64);
__m256 _r09 = _mm256_broadcast_ss(r0 + 72);
__m256 _r010 = _mm256_broadcast_ss(r0 + 80);
__m256 _r011 = _mm256_broadcast_ss(r0 + 88);
_sum8 = _mm256_comp_fmadd_ps(_k01, _r08, _sum8);
_sum9 = _mm256_comp_fmadd_ps(_k01, _r09, _sum9);
_sum10 = _mm256_comp_fmadd_ps(_k01, _r010, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_k01, _r011, _sum11);
_k01 = _mm256_loadu_ps(k01 + 8);
_r00 = _mm256_broadcast_ss(r0 + 1);
_r01 = _mm256_broadcast_ss(r0 + 9);
_r02 = _mm256_broadcast_ss(r0 + 17);
_r03 = _mm256_broadcast_ss(r0 + 25);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3);
_r04 = _mm256_broadcast_ss(r0 + 33);
_r05 = _mm256_broadcast_ss(r0 + 41);
_r06 = _mm256_broadcast_ss(r0 + 49);
_r07 = _mm256_broadcast_ss(r0 + 57);
_sum4 = _mm256_comp_fmadd_ps(_k01, _r04, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_k01, _r05, _sum5);
_sum6 = _mm256_comp_fmadd_ps(_k01, _r06, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_k01, _r07, _sum7);
_r08 = _mm256_broadcast_ss(r0 + 65);
_r09 = _mm256_broadcast_ss(r0 + 73);
_r010 = _mm256_broadcast_ss(r0 + 81);
_r011 = _mm256_broadcast_ss(r0 + 89);
_sum8 = _mm256_comp_fmadd_ps(_k01, _r08, _sum8);
_sum9 = _mm256_comp_fmadd_ps(_k01, _r09, _sum9);
_sum10 = _mm256_comp_fmadd_ps(_k01, _r010, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_k01, _r011, _sum11);
_k01 = _mm256_loadu_ps(k01 + 16);
_r00 = _mm256_broadcast_ss(r0 + 2);
_r01 = _mm256_broadcast_ss(r0 + 10);
_r02 = _mm256_broadcast_ss(r0 + 18);
_r03 = _mm256_broadcast_ss(r0 + 26);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3);
_r04 = _mm256_broadcast_ss(r0 + 34);
_r05 = _mm256_broadcast_ss(r0 + 42);
_r06 = _mm256_broadcast_ss(r0 + 50);
_r07 = _mm256_broadcast_ss(r0 + 58);
_sum4 = _mm256_comp_fmadd_ps(_k01, _r04, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_k01, _r05, _sum5);
_sum6 = _mm256_comp_fmadd_ps(_k01, _r06, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_k01, _r07, _sum7);
_r08 = _mm256_broadcast_ss(r0 + 66);
_r09 = _mm256_broadcast_ss(r0 + 74);
_r010 = _mm256_broadcast_ss(r0 + 82);
_r011 = _mm256_broadcast_ss(r0 + 90);
_sum8 = _mm256_comp_fmadd_ps(_k01, _r08, _sum8);
_sum9 = _mm256_comp_fmadd_ps(_k01, _r09, _sum9);
_sum10 = _mm256_comp_fmadd_ps(_k01, _r010, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_k01, _r011, _sum11);
_k01 = _mm256_loadu_ps(k01 + 24);
_r00 = _mm256_broadcast_ss(r0 + 3);
_r01 = _mm256_broadcast_ss(r0 + 11);
_r02 = _mm256_broadcast_ss(r0 + 19);
_r03 = _mm256_broadcast_ss(r0 + 27);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3);
_r04 = _mm256_broadcast_ss(r0 + 35);
_r05 = _mm256_broadcast_ss(r0 + 43);
_r06 = _mm256_broadcast_ss(r0 + 51);
_r07 = _mm256_broadcast_ss(r0 + 59);
_sum4 = _mm256_comp_fmadd_ps(_k01, _r04, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_k01, _r05, _sum5);
_sum6 = _mm256_comp_fmadd_ps(_k01, _r06, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_k01, _r07, _sum7);
_r08 = _mm256_broadcast_ss(r0 + 67);
_r09 = _mm256_broadcast_ss(r0 + 75);
_r010 = _mm256_broadcast_ss(r0 + 83);
_r011 = _mm256_broadcast_ss(r0 + 91);
_sum8 = _mm256_comp_fmadd_ps(_k01, _r08, _sum8);
_sum9 = _mm256_comp_fmadd_ps(_k01, _r09, _sum9);
_sum10 = _mm256_comp_fmadd_ps(_k01, _r010, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_k01, _r011, _sum11);
_k01 = _mm256_loadu_ps(k01 + 32);
_r00 = _mm256_broadcast_ss(r0 + 4);
_r01 = _mm256_broadcast_ss(r0 + 12);
_r02 = _mm256_broadcast_ss(r0 + 20);
_r03 = _mm256_broadcast_ss(r0 + 28);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3);
_r04 = _mm256_broadcast_ss(r0 + 36);
_r05 = _mm256_broadcast_ss(r0 + 44);
_r06 = _mm256_broadcast_ss(r0 + 52);
_r07 = _mm256_broadcast_ss(r0 + 60);
_sum4 = _mm256_comp_fmadd_ps(_k01, _r04, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_k01, _r05, _sum5);
_sum6 = _mm256_comp_fmadd_ps(_k01, _r06, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_k01, _r07, _sum7);
_r08 = _mm256_broadcast_ss(r0 + 68);
_r09 = _mm256_broadcast_ss(r0 + 76);
_r010 = _mm256_broadcast_ss(r0 + 84);
_r011 = _mm256_broadcast_ss(r0 + 92);
_sum8 = _mm256_comp_fmadd_ps(_k01, _r08, _sum8);
_sum9 = _mm256_comp_fmadd_ps(_k01, _r09, _sum9);
_sum10 = _mm256_comp_fmadd_ps(_k01, _r010, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_k01, _r011, _sum11);
_k01 = _mm256_loadu_ps(k01 + 40);
_r00 = _mm256_broadcast_ss(r0 + 5);
_r01 = _mm256_broadcast_ss(r0 + 13);
_r02 = _mm256_broadcast_ss(r0 + 21);
_r03 = _mm256_broadcast_ss(r0 + 29);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3);
_r04 = _mm256_broadcast_ss(r0 + 37);
_r05 = _mm256_broadcast_ss(r0 + 45);
_r06 = _mm256_broadcast_ss(r0 + 53);
_r07 = _mm256_broadcast_ss(r0 + 61);
_sum4 = _mm256_comp_fmadd_ps(_k01, _r04, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_k01, _r05, _sum5);
_sum6 = _mm256_comp_fmadd_ps(_k01, _r06, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_k01, _r07, _sum7);
_r08 = _mm256_broadcast_ss(r0 + 69);
_r09 = _mm256_broadcast_ss(r0 + 77);
_r010 = _mm256_broadcast_ss(r0 + 85);
_r011 = _mm256_broadcast_ss(r0 + 93);
_sum8 = _mm256_comp_fmadd_ps(_k01, _r08, _sum8);
_sum9 = _mm256_comp_fmadd_ps(_k01, _r09, _sum9);
_sum10 = _mm256_comp_fmadd_ps(_k01, _r010, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_k01, _r011, _sum11);
_k01 = _mm256_loadu_ps(k01 + 48);
_r00 = _mm256_broadcast_ss(r0 + 6);
_r01 = _mm256_broadcast_ss(r0 + 14);
_r02 = _mm256_broadcast_ss(r0 + 22);
_r03 = _mm256_broadcast_ss(r0 + 30);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3);
_r04 = _mm256_broadcast_ss(r0 + 38);
_r05 = _mm256_broadcast_ss(r0 + 46);
_r06 = _mm256_broadcast_ss(r0 + 54);
_r07 = _mm256_broadcast_ss(r0 + 62);
_sum4 = _mm256_comp_fmadd_ps(_k01, _r04, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_k01, _r05, _sum5);
_sum6 = _mm256_comp_fmadd_ps(_k01, _r06, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_k01, _r07, _sum7);
_r08 = _mm256_broadcast_ss(r0 + 70);
_r09 = _mm256_broadcast_ss(r0 + 78);
_r010 = _mm256_broadcast_ss(r0 + 86);
_r011 = _mm256_broadcast_ss(r0 + 94);
_sum8 = _mm256_comp_fmadd_ps(_k01, _r08, _sum8);
_sum9 = _mm256_comp_fmadd_ps(_k01, _r09, _sum9);
_sum10 = _mm256_comp_fmadd_ps(_k01, _r010, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_k01, _r011, _sum11);
_k01 = _mm256_loadu_ps(k01 + 56);
_r00 = _mm256_broadcast_ss(r0 + 7);
_r01 = _mm256_broadcast_ss(r0 + 15);
_r02 = _mm256_broadcast_ss(r0 + 23);
_r03 = _mm256_broadcast_ss(r0 + 31);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3);
_r04 = _mm256_broadcast_ss(r0 + 39);
_r05 = _mm256_broadcast_ss(r0 + 47);
_r06 = _mm256_broadcast_ss(r0 + 55);
_r07 = _mm256_broadcast_ss(r0 + 63);
_sum4 = _mm256_comp_fmadd_ps(_k01, _r04, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_k01, _r05, _sum5);
_sum6 = _mm256_comp_fmadd_ps(_k01, _r06, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_k01, _r07, _sum7);
_r08 = _mm256_broadcast_ss(r0 + 71);
_r09 = _mm256_broadcast_ss(r0 + 79);
_r010 = _mm256_broadcast_ss(r0 + 87);
_r011 = _mm256_broadcast_ss(r0 + 95);
_sum8 = _mm256_comp_fmadd_ps(_k01, _r08, _sum8);
_sum9 = _mm256_comp_fmadd_ps(_k01, _r09, _sum9);
_sum10 = _mm256_comp_fmadd_ps(_k01, _r010, _sum10);
_sum11 = _mm256_comp_fmadd_ps(_k01, _r011, _sum11);
k01 += 64;
r0 += 96;
}
_mm256_storeu_ps(output0_tm, _sum0);
_mm256_storeu_ps(output0_tm + 8, _sum1);
_mm256_storeu_ps(output0_tm + 16, _sum2);
_mm256_storeu_ps(output0_tm + 24, _sum3);
_mm256_storeu_ps(output0_tm + 32, _sum4);
_mm256_storeu_ps(output0_tm + 40, _sum5);
_mm256_storeu_ps(output0_tm + 48, _sum6);
_mm256_storeu_ps(output0_tm + 56, _sum7);
_mm256_storeu_ps(output0_tm + 64, _sum8);
_mm256_storeu_ps(output0_tm + 72, _sum9);
_mm256_storeu_ps(output0_tm + 80, _sum10);
_mm256_storeu_ps(output0_tm + 88, _sum11);
output0_tm += 96;
}
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
const float* k01 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
__m256 _sum0 = _mm256_set1_ps(0.f);
__m256 _sum1 = _mm256_set1_ps(0.f);
__m256 _sum2 = _mm256_set1_ps(0.f);
__m256 _sum3 = _mm256_set1_ps(0.f);
__m256 _sum4 = _mm256_set1_ps(0.f);
__m256 _sum5 = _mm256_set1_ps(0.f);
__m256 _sum6 = _mm256_set1_ps(0.f);
__m256 _sum7 = _mm256_set1_ps(0.f);
for (; nn > 0; nn--)
{
__m256 _k01 = _mm256_loadu_ps(k01);
__m256 _r00 = _mm256_broadcast_ss(r0 + 0);
__m256 _r01 = _mm256_broadcast_ss(r0 + 8);
__m256 _r02 = _mm256_broadcast_ss(r0 + 16);
__m256 _r03 = _mm256_broadcast_ss(r0 + 24);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3);
__m256 _r04 = _mm256_broadcast_ss(r0 + 32);
__m256 _r05 = _mm256_broadcast_ss(r0 + 40);
__m256 _r06 = _mm256_broadcast_ss(r0 + 48);
__m256 _r07 = _mm256_broadcast_ss(r0 + 56);
_sum4 = _mm256_comp_fmadd_ps(_k01, _r04, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_k01, _r05, _sum5);
_sum6 = _mm256_comp_fmadd_ps(_k01, _r06, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_k01, _r07, _sum7);
_k01 = _mm256_loadu_ps(k01 + 8);
_r00 = _mm256_broadcast_ss(r0 + 1);
_r01 = _mm256_broadcast_ss(r0 + 9);
_r02 = _mm256_broadcast_ss(r0 + 17);
_r03 = _mm256_broadcast_ss(r0 + 25);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3);
_r04 = _mm256_broadcast_ss(r0 + 33);
_r05 = _mm256_broadcast_ss(r0 + 41);
_r06 = _mm256_broadcast_ss(r0 + 49);
_r07 = _mm256_broadcast_ss(r0 + 57);
_sum4 = _mm256_comp_fmadd_ps(_k01, _r04, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_k01, _r05, _sum5);
_sum6 = _mm256_comp_fmadd_ps(_k01, _r06, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_k01, _r07, _sum7);
_k01 = _mm256_loadu_ps(k01 + 16);
_r00 = _mm256_broadcast_ss(r0 + 2);
_r01 = _mm256_broadcast_ss(r0 + 10);
_r02 = _mm256_broadcast_ss(r0 + 18);
_r03 = _mm256_broadcast_ss(r0 + 26);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3);
_r04 = _mm256_broadcast_ss(r0 + 34);
_r05 = _mm256_broadcast_ss(r0 + 42);
_r06 = _mm256_broadcast_ss(r0 + 50);
_r07 = _mm256_broadcast_ss(r0 + 58);
_sum4 = _mm256_comp_fmadd_ps(_k01, _r04, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_k01, _r05, _sum5);
_sum6 = _mm256_comp_fmadd_ps(_k01, _r06, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_k01, _r07, _sum7);
_k01 = _mm256_loadu_ps(k01 + 24);
_r00 = _mm256_broadcast_ss(r0 + 3);
_r01 = _mm256_broadcast_ss(r0 + 11);
_r02 = _mm256_broadcast_ss(r0 + 19);
_r03 = _mm256_broadcast_ss(r0 + 27);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3);
_r04 = _mm256_broadcast_ss(r0 + 35);
_r05 = _mm256_broadcast_ss(r0 + 43);
_r06 = _mm256_broadcast_ss(r0 + 51);
_r07 = _mm256_broadcast_ss(r0 + 59);
_sum4 = _mm256_comp_fmadd_ps(_k01, _r04, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_k01, _r05, _sum5);
_sum6 = _mm256_comp_fmadd_ps(_k01, _r06, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_k01, _r07, _sum7);
_k01 = _mm256_loadu_ps(k01 + 32);
_r00 = _mm256_broadcast_ss(r0 + 4);
_r01 = _mm256_broadcast_ss(r0 + 12);
_r02 = _mm256_broadcast_ss(r0 + 20);
_r03 = _mm256_broadcast_ss(r0 + 28);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3);
_r04 = _mm256_broadcast_ss(r0 + 36);
_r05 = _mm256_broadcast_ss(r0 + 44);
_r06 = _mm256_broadcast_ss(r0 + 52);
_r07 = _mm256_broadcast_ss(r0 + 60);
_sum4 = _mm256_comp_fmadd_ps(_k01, _r04, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_k01, _r05, _sum5);
_sum6 = _mm256_comp_fmadd_ps(_k01, _r06, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_k01, _r07, _sum7);
_k01 = _mm256_loadu_ps(k01 + 40);
_r00 = _mm256_broadcast_ss(r0 + 5);
_r01 = _mm256_broadcast_ss(r0 + 13);
_r02 = _mm256_broadcast_ss(r0 + 21);
_r03 = _mm256_broadcast_ss(r0 + 29);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3);
_r04 = _mm256_broadcast_ss(r0 + 37);
_r05 = _mm256_broadcast_ss(r0 + 45);
_r06 = _mm256_broadcast_ss(r0 + 53);
_r07 = _mm256_broadcast_ss(r0 + 61);
_sum4 = _mm256_comp_fmadd_ps(_k01, _r04, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_k01, _r05, _sum5);
_sum6 = _mm256_comp_fmadd_ps(_k01, _r06, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_k01, _r07, _sum7);
_k01 = _mm256_loadu_ps(k01 + 48);
_r00 = _mm256_broadcast_ss(r0 + 6);
_r01 = _mm256_broadcast_ss(r0 + 14);
_r02 = _mm256_broadcast_ss(r0 + 22);
_r03 = _mm256_broadcast_ss(r0 + 30);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3);
_r04 = _mm256_broadcast_ss(r0 + 38);
_r05 = _mm256_broadcast_ss(r0 + 46);
_r06 = _mm256_broadcast_ss(r0 + 54);
_r07 = _mm256_broadcast_ss(r0 + 62);
_sum4 = _mm256_comp_fmadd_ps(_k01, _r04, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_k01, _r05, _sum5);
_sum6 = _mm256_comp_fmadd_ps(_k01, _r06, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_k01, _r07, _sum7);
_k01 = _mm256_loadu_ps(k01 + 56);
_r00 = _mm256_broadcast_ss(r0 + 7);
_r01 = _mm256_broadcast_ss(r0 + 15);
_r02 = _mm256_broadcast_ss(r0 + 23);
_r03 = _mm256_broadcast_ss(r0 + 31);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3);
_r04 = _mm256_broadcast_ss(r0 + 39);
_r05 = _mm256_broadcast_ss(r0 + 47);
_r06 = _mm256_broadcast_ss(r0 + 55);
_r07 = _mm256_broadcast_ss(r0 + 63);
_sum4 = _mm256_comp_fmadd_ps(_k01, _r04, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_k01, _r05, _sum5);
_sum6 = _mm256_comp_fmadd_ps(_k01, _r06, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_k01, _r07, _sum7);
k01 += 64;
r0 += 64;
}
_mm256_storeu_ps(output0_tm, _sum0);
_mm256_storeu_ps(output0_tm + 8, _sum1);
_mm256_storeu_ps(output0_tm + 16, _sum2);
_mm256_storeu_ps(output0_tm + 24, _sum3);
_mm256_storeu_ps(output0_tm + 32, _sum4);
_mm256_storeu_ps(output0_tm + 40, _sum5);
_mm256_storeu_ps(output0_tm + 48, _sum6);
_mm256_storeu_ps(output0_tm + 56, _sum7);
output0_tm += 64;
}
for (; i + 3 < tiles; i += 4)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* k01 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
__m256 _sum0 = _mm256_set1_ps(0.f);
__m256 _sum1 = _mm256_set1_ps(0.f);
__m256 _sum2 = _mm256_set1_ps(0.f);
__m256 _sum3 = _mm256_set1_ps(0.f);
for (; nn > 0; nn--)
{
__m256 _k01 = _mm256_loadu_ps(k01);
__m256 _r00 = _mm256_broadcast_ss(r0 + 0);
__m256 _r01 = _mm256_broadcast_ss(r0 + 8);
__m256 _r02 = _mm256_broadcast_ss(r0 + 16);
__m256 _r03 = _mm256_broadcast_ss(r0 + 24);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3);
_k01 = _mm256_loadu_ps(k01 + 8);
_r00 = _mm256_broadcast_ss(r0 + 1);
_r01 = _mm256_broadcast_ss(r0 + 9);
_r02 = _mm256_broadcast_ss(r0 + 17);
_r03 = _mm256_broadcast_ss(r0 + 25);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3);
_k01 = _mm256_loadu_ps(k01 + 16);
_r00 = _mm256_broadcast_ss(r0 + 2);
_r01 = _mm256_broadcast_ss(r0 + 10);
_r02 = _mm256_broadcast_ss(r0 + 18);
_r03 = _mm256_broadcast_ss(r0 + 26);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3);
_k01 = _mm256_loadu_ps(k01 + 24);
_r00 = _mm256_broadcast_ss(r0 + 3);
_r01 = _mm256_broadcast_ss(r0 + 11);
_r02 = _mm256_broadcast_ss(r0 + 19);
_r03 = _mm256_broadcast_ss(r0 + 27);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3);
_k01 = _mm256_loadu_ps(k01 + 32);
_r00 = _mm256_broadcast_ss(r0 + 4);
_r01 = _mm256_broadcast_ss(r0 + 12);
_r02 = _mm256_broadcast_ss(r0 + 20);
_r03 = _mm256_broadcast_ss(r0 + 28);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3);
_k01 = _mm256_loadu_ps(k01 + 40);
_r00 = _mm256_broadcast_ss(r0 + 5);
_r01 = _mm256_broadcast_ss(r0 + 13);
_r02 = _mm256_broadcast_ss(r0 + 21);
_r03 = _mm256_broadcast_ss(r0 + 29);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3);
_k01 = _mm256_loadu_ps(k01 + 48);
_r00 = _mm256_broadcast_ss(r0 + 6);
_r01 = _mm256_broadcast_ss(r0 + 14);
_r02 = _mm256_broadcast_ss(r0 + 22);
_r03 = _mm256_broadcast_ss(r0 + 30);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3);
_k01 = _mm256_loadu_ps(k01 + 56);
_r00 = _mm256_broadcast_ss(r0 + 7);
_r01 = _mm256_broadcast_ss(r0 + 15);
_r02 = _mm256_broadcast_ss(r0 + 23);
_r03 = _mm256_broadcast_ss(r0 + 31);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r00, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_k01, _r02, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_k01, _r03, _sum3);
k01 += 64;
r0 += 32;
}
_mm256_storeu_ps(output0_tm, _sum0);
_mm256_storeu_ps(output0_tm + 8, _sum1);
_mm256_storeu_ps(output0_tm + 16, _sum2);
_mm256_storeu_ps(output0_tm + 24, _sum3);
output0_tm += 32;
}
for (; i + 1 < tiles; i += 2)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* k01 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
__m256 _sum0 = _mm256_set1_ps(0.f);
__m256 _sum1 = _mm256_set1_ps(0.f);
for (; nn > 0; nn--)
{
__m256 _k01 = _mm256_loadu_ps(k01);
__m256 _r0 = _mm256_broadcast_ss(r0);
__m256 _r01 = _mm256_broadcast_ss(r0 + 8);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_k01 = _mm256_loadu_ps(k01 + 8);
_r0 = _mm256_broadcast_ss(r0 + 1);
_r01 = _mm256_broadcast_ss(r0 + 9);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_k01 = _mm256_loadu_ps(k01 + 16);
_r0 = _mm256_broadcast_ss(r0 + 2);
_r01 = _mm256_broadcast_ss(r0 + 10);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_k01 = _mm256_loadu_ps(k01 + 24);
_r0 = _mm256_broadcast_ss(r0 + 3);
_r01 = _mm256_broadcast_ss(r0 + 11);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_k01 = _mm256_loadu_ps(k01 + 32);
_r0 = _mm256_broadcast_ss(r0 + 4);
_r01 = _mm256_broadcast_ss(r0 + 12);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_k01 = _mm256_loadu_ps(k01 + 40);
_r0 = _mm256_broadcast_ss(r0 + 5);
_r01 = _mm256_broadcast_ss(r0 + 13);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_k01 = _mm256_loadu_ps(k01 + 48);
_r0 = _mm256_broadcast_ss(r0 + 6);
_r01 = _mm256_broadcast_ss(r0 + 14);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
_k01 = _mm256_loadu_ps(k01 + 56);
_r0 = _mm256_broadcast_ss(r0 + 7);
_r01 = _mm256_broadcast_ss(r0 + 15);
_sum0 = _mm256_comp_fmadd_ps(_k01, _r0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1);
k01 += 64;
r0 += 16;
}
_mm256_storeu_ps(output0_tm, _sum0);
_mm256_storeu_ps(output0_tm + 8, _sum1);
output0_tm += 16;
}
for (; i < tiles; i++)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* k01 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
__m256 _sum0 = _mm256_set1_ps(0.f);
for (; nn > 0; nn--)
{
__m256 _k01 = _mm256_loadu_ps(k01);
__m256 _r0 = _mm256_broadcast_ss(r0);
__m256 _mul0 = _mm256_mul_ps(_k01, _r0);
_k01 = _mm256_loadu_ps(k01 + 8);
_r0 = _mm256_broadcast_ss(r0 + 1);
__m256 _mul1 = _mm256_mul_ps(_k01, _r0);
_k01 = _mm256_loadu_ps(k01 + 16);
_r0 = _mm256_broadcast_ss(r0 + 2);
__m256 _mul2 = _mm256_mul_ps(_k01, _r0);
__m256 _add01 = _mm256_add_ps(_mul0, _mul1);
_k01 = _mm256_loadu_ps(k01 + 24);
_r0 = _mm256_broadcast_ss(r0 + 3);
__m256 _mul3 = _mm256_mul_ps(_k01, _r0);
__m256 _add23 = _mm256_add_ps(_mul2, _mul3);
__m256 _add0123 = _mm256_add_ps(_add01, _add23);
_sum0 = _mm256_add_ps(_sum0, _add0123);
_k01 = _mm256_loadu_ps(k01 + 32);
_r0 = _mm256_broadcast_ss(r0 + 4);
__m256 _mul4 = _mm256_mul_ps(_k01, _r0);
_k01 = _mm256_loadu_ps(k01 + 40);
_r0 = _mm256_broadcast_ss(r0 + 5);
__m256 _mul5 = _mm256_mul_ps(_k01, _r0);
_k01 = _mm256_loadu_ps(k01 + 48);
_r0 = _mm256_broadcast_ss(r0 + 6);
__m256 _mul6 = _mm256_mul_ps(_k01, _r0);
__m256 _add45 = _mm256_add_ps(_mul4, _mul5);
_k01 = _mm256_loadu_ps(k01 + 56);
_r0 = _mm256_broadcast_ss(r0 + 7);
__m256 _mul7 = _mm256_mul_ps(_k01, _r0);
__m256 _add67 = _mm256_add_ps(_mul6, _mul7);
__m256 _add4567 = _mm256_add_ps(_add45, _add67);
_sum0 = _mm256_add_ps(_sum0, _add4567);
k01 += 64;
r0 += 8;
}
_mm256_storeu_ps(output0_tm, _sum0);
output0_tm += 8;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm / 8 * h_tm / 8;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
// const float bias0 = bias ? bias[p] : 0.f;
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f);
float tmp[6][8][8];
// tile
for (int i = 0; i < outh / 6; i++)
{
for (int j = 0; j < outw / 6; j++)
{
// top_blob_tm.create(tiles, 64, outch, elemsize, elempack);
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 8 + j) * 8;
const float* output0_tm_1 = output0_tm_0 + tiles * 8;
const float* output0_tm_2 = output0_tm_0 + tiles * 16;
const float* output0_tm_3 = output0_tm_0 + tiles * 24;
const float* output0_tm_4 = output0_tm_0 + tiles * 32;
const float* output0_tm_5 = output0_tm_0 + tiles * 40;
const float* output0_tm_6 = output0_tm_0 + tiles * 48;
const float* output0_tm_7 = output0_tm_0 + tiles * 56;
float* output0 = out0.row(i * 6) + (j * 6) * 8;
// TODO neon optimize
for (int m = 0; m < 8; m++)
{
__m256 _out0tm0 = _mm256_loadu_ps(output0_tm_0);
__m256 _out0tm1 = _mm256_loadu_ps(output0_tm_1);
__m256 _out0tm2 = _mm256_loadu_ps(output0_tm_2);
__m256 _out0tm3 = _mm256_loadu_ps(output0_tm_3);
__m256 _out0tm4 = _mm256_loadu_ps(output0_tm_4);
__m256 _out0tm5 = _mm256_loadu_ps(output0_tm_5);
__m256 _out0tm6 = _mm256_loadu_ps(output0_tm_6);
__m256 _out0tm7 = _mm256_loadu_ps(output0_tm_7);
__m256 _tmp024a = _mm256_add_ps(_out0tm1, _out0tm2);
__m256 _tmp135a = _mm256_sub_ps(_out0tm1, _out0tm2);
// float tmp024a = output0_tm[1] + output0_tm[2];
// float tmp135a = output0_tm[1] - output0_tm[2];
__m256 _tmp024b = _mm256_add_ps(_out0tm3, _out0tm4);
__m256 _tmp135b = _mm256_sub_ps(_out0tm3, _out0tm4);
// float tmp024b = output0_tm[3] + output0_tm[4];
// float tmp135b = output0_tm[3] - output0_tm[4];
__m256 _tmp024c = _mm256_add_ps(_out0tm5, _out0tm6);
__m256 _tmp135c = _mm256_sub_ps(_out0tm5, _out0tm6);
// float tmp024c = output0_tm[5] + output0_tm[6];
// float tmp135c = output0_tm[5] - output0_tm[6];
__m256 _tmp0m = _mm256_add_ps(_mm256_add_ps(_out0tm0, _tmp024a), _mm256_fmadd_1_ps(_tmp024b, _tmp024c, 32.f));
__m256 _tmp2m = _mm256_fmadd_1_ps(_mm256_fmadd_1_ps(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f);
__m256 _tmp4m = _mm256_fmadd_1_ps(_mm256_fmadd_1_ps(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f);
_mm256_storeu_ps(tmp[0][m], _tmp0m);
_mm256_storeu_ps(tmp[2][m], _tmp2m);
_mm256_storeu_ps(tmp[4][m], _tmp4m);
// tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32;
// tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8;
// tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c;
__m256 _tmp1m = _mm256_fmadd_1_ps(_mm256_fmadd_1_ps(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f);
__m256 _tmp3m = _mm256_fmadd_1_ps(_mm256_fmadd_1_ps(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f);
__m256 _tmp5m = _mm256_add_ps(_mm256_add_ps(_out0tm7, _tmp135a), _mm256_fmadd_1_ps(_tmp135c, _tmp135b, 32.f));
_mm256_storeu_ps(tmp[1][m], _tmp1m);
_mm256_storeu_ps(tmp[3][m], _tmp3m);
_mm256_storeu_ps(tmp[5][m], _tmp5m);
// tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16;
// tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4;
// tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c;
output0_tm_0 += tiles * 64;
output0_tm_1 += tiles * 64;
output0_tm_2 += tiles * 64;
output0_tm_3 += tiles * 64;
output0_tm_4 += tiles * 64;
output0_tm_5 += tiles * 64;
output0_tm_6 += tiles * 64;
output0_tm_7 += tiles * 64;
}
for (int m = 0; m < 6; m++)
{
__m256 _tmp00 = _mm256_loadu_ps(tmp[m][0]);
__m256 _tmp01 = _mm256_loadu_ps(tmp[m][1]);
__m256 _tmp02 = _mm256_loadu_ps(tmp[m][2]);
__m256 _tmp03 = _mm256_loadu_ps(tmp[m][3]);
__m256 _tmp04 = _mm256_loadu_ps(tmp[m][4]);
__m256 _tmp05 = _mm256_loadu_ps(tmp[m][5]);
__m256 _tmp06 = _mm256_loadu_ps(tmp[m][6]);
__m256 _tmp07 = _mm256_loadu_ps(tmp[m][7]);
__m256 _tmp024a = _mm256_add_ps(_tmp01, _tmp02);
__m256 _tmp135a = _mm256_sub_ps(_tmp01, _tmp02);
// float tmp024a = tmp0[1] + tmp0[2];
// float tmp135a = tmp0[1] - tmp0[2];
__m256 _tmp024b = _mm256_add_ps(_tmp03, _tmp04);
__m256 _tmp135b = _mm256_sub_ps(_tmp03, _tmp04);
// float tmp024b = tmp0[3] + tmp0[4];
// float tmp135b = tmp0[3] - tmp0[4];
__m256 _tmp024c = _mm256_add_ps(_tmp05, _tmp06);
__m256 _tmp135c = _mm256_sub_ps(_tmp05, _tmp06);
// float tmp024c = tmp0[5] + tmp0[6];
// float tmp135c = tmp0[5] - tmp0[6];
__m256 _out00 = _mm256_add_ps(_bias0, _mm256_add_ps(_mm256_add_ps(_tmp00, _tmp024a), _mm256_fmadd_1_ps(_tmp024b, _tmp024c, 32.f)));
__m256 _out02 = _mm256_add_ps(_bias0, _mm256_fmadd_1_ps(_mm256_fmadd_1_ps(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f));
__m256 _out04 = _mm256_add_ps(_bias0, _mm256_fmadd_1_ps(_mm256_fmadd_1_ps(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f));
_mm256_storeu_ps(output0, _out00);
_mm256_storeu_ps(output0 + 16, _out02);
_mm256_storeu_ps(output0 + 32, _out04);
// output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32;
// output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8;
// output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c;
__m256 _out01 = _mm256_add_ps(_bias0, _mm256_fmadd_1_ps(_mm256_fmadd_1_ps(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f));
__m256 _out03 = _mm256_add_ps(_bias0, _mm256_fmadd_1_ps(_mm256_fmadd_1_ps(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f));
__m256 _out05 = _mm256_add_ps(_bias0, _mm256_add_ps(_mm256_add_ps(_tmp07, _tmp135a), _mm256_fmadd_1_ps(_tmp135c, _tmp135b, 32.f)));
_mm256_storeu_ps(output0 + 8, _out01);
_mm256_storeu_ps(output0 + 24, _out03);
_mm256_storeu_ps(output0 + 40, _out05);
// output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16;
// output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4;
// output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c;
output0 += outw * 8;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
|
core_stradd.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_ztradd.c, normal z -> s, Fri Sep 28 17:38:23 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_internal.h"
#include "plasma_types.h"
#include "core_lapack.h"
/***************************************************************************//**
*
* @ingroup core_tradd
*
* Performs an addition of two trapezoidal matrices similarly to the
* pstradd() function from the PBLAS library:
*
* \f[ B = \alpha * op( A ) + \beta * B, \f]
*
* where op( X ) is one of:
* \f[ op( X ) = X, \f]
* \f[ op( X ) = X^T, \f]
* \f[ op( X ) = X^T, \f]
*
* alpha and beta are scalars and A, B are matrices with op( A ) an m-by-n or
* n-by-m matrix depending on the value of transa and B an m-by-n matrix.
*
*******************************************************************************
*
* @param[in] uplo
* Specifies the shape of A and B matrices:
* - PlasmaUpper: op( A ) and B are upper trapezoidal matrices.
* - PlasmaLower: op( A ) and B are lower trapezoidal matrices.
*
* @param[in] transa
* Specifies whether the matrix A is non-transposed, transposed, or
* conjugate transposed
* - PlasmaNoTrans: op( A ) = A
* - PlasmaTrans: op( A ) = A^T
* - PlasmaConjTrans: op( A ) = A^T
*
* @param[in] m
* Number of rows of the matrices op( A ) and B.
* m >= 0.
*
* @param[in] n
* Number of columns of the matrices op( A ) and B.
* n >= 0.
*
* @param[in] alpha
* Scalar factor of A.
*
* @param[in] A
* Matrix of size lda-by-k, where k is n when transa == PlasmaNoTrans
* and m otherwise.
*
* @param[in] lda
* Leading dimension of the array A. lda >= max(1,l), where l is m
* when transa = PlasmaNoTrans and n otherwise.
*
* @param[in] beta
* Scalar factor of B.
*
* @param[in,out] B
* Matrix of size ldb-by-n.
* On exit, B = alpha * op( A ) + beta * B
*
* @param[in] ldb
* Leading dimension of the array B.
* ldb >= max(1,m).
*
******************************************************************************/
__attribute__((weak))
int plasma_core_stradd(plasma_enum_t uplo, plasma_enum_t transa,
int m, int n,
float alpha, const float *A, int lda,
float beta, float *B, int ldb)
{
// Check input arguments
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_coreblas_error("illegal value of uplo");
return -1;
}
if ((transa != PlasmaNoTrans) &&
(transa != PlasmaTrans) &&
(transa != PlasmaConjTrans)) {
plasma_coreblas_error("illegal value of transa");
return -2;
}
if (m < 0) {
plasma_coreblas_error("illegal value of m");
return -3;
}
if (n < 0) {
plasma_coreblas_error("illegal value of n");
return -4;
}
if (A == NULL) {
plasma_coreblas_error("NULL A");
return -6;
}
if ((transa == PlasmaNoTrans && lda < imax(1, m) && m > 0) ||
(transa != PlasmaNoTrans && lda < imax(1, n) && n > 0)) {
plasma_coreblas_error("illegal value of lda");
return -7;
}
if (B == NULL) {
plasma_coreblas_error("NULL B");
return -9;
}
if (ldb < imax(1, m) && (m > 0)) {
plasma_coreblas_error("illegal value of ldb");
return -10;
}
// quick return
if (m == 0 || n == 0 || (alpha == 0.0 && beta == 1.0))
return PlasmaSuccess;
//==============
// PlasmaLower
//==============
if (uplo == PlasmaLower) {
switch (transa) {
case PlasmaConjTrans:
for (int j = 0; j < n; j++)
for (int i = j; i < m; i++)
B[ldb*j+i] = beta * B[ldb*j+i] + alpha * (A[lda*i+j]);
break;
case PlasmaTrans:
for (int j = 0; j < n; j++)
for (int i = j; i < m; i++)
B[ldb*j+i] = beta * B[ldb*j+i] + alpha * A[lda*i+j];
break;
case PlasmaNoTrans:
default:
for (int j = 0; j < n; j++)
for (int i = j; i < m; i++)
B[ldb*j+i] = beta * B[ldb*j+i] + alpha * A[lda*j+i];
}
}
//==============
// PlasmaUpper
//==============
else {
switch (transa) {
case PlasmaConjTrans:
for (int j = 0; j < n; j++)
for (int i = 0; i < imin(j+1, m); i++)
B[ldb*j+i] = beta * B[ldb*j+i] + alpha * (A[lda*i+j]);
break;
case PlasmaTrans:
for (int j = 0; j < n; j++)
for (int i = 0; i < imin(j+1, m); i++)
B[ldb*j+i] = beta * B[ldb*j+i] + alpha * A[lda*i+j];
break;
case PlasmaNoTrans:
default:
for (int j = 0; j < n; j++)
for (int i = 0; i < imin(j+1, m); i++)
B[ldb*j+i] = beta * B[ldb*j+i] + alpha * A[lda*j+i];
}
}
return PlasmaSuccess;
}
/******************************************************************************/
void plasma_core_omp_stradd(
plasma_enum_t uplo, plasma_enum_t transa,
int m, int n,
float alpha, const float *A, int lda,
float beta, float *B, int ldb,
plasma_sequence_t *sequence, plasma_request_t *request)
{
int k = (transa == PlasmaNoTrans) ? n : m;
#pragma omp task depend(in:A[0:lda*k]) \
depend(inout:B[0:ldb*n])
{
if (sequence->status == PlasmaSuccess) {
int retval = plasma_core_stradd(uplo, transa,
m, n,
alpha, A, lda,
beta, B, ldb);
if (retval != PlasmaSuccess) {
plasma_error("core_stradd() failed");
plasma_request_fail(sequence, request, PlasmaErrorInternal);
}
}
}
}
|
aln_controller.c | #include <stdint.h>
#include "tldevel.h"
#include "alignment_parameters.h"
#include "aln_struct.h"
/* The dynamic programming modules */
#include "aln_seqseq.h"
#include "aln_seqprofile.h"
#include "aln_profileprofile.h"
#define ALN_CONTROLLER_IMPORT
#include "aln_controller.h"
//static int aln_continue(struct aln_mem* m,struct aln_param* ap,int* path,int meet,int transition);
static int aln_continue(struct aln_mem* m,float input_states[],int old_cor[],int* path,int meet,int transition);
int aln_runner(struct aln_mem* m, int* path)
{
float input_states[6];
int old_cor[5];
float score;
int mid;
int meet;
int transition;
input_states[0] = m->f[0].a;
input_states[1] = m->f[0].ga;
input_states[2] = m->f[0].gb;
input_states[3] = m->b[0].a;
input_states[4] = m->b[0].ga;
input_states[5] = m->b[0].gb;
mid = ((m->enda - m->starta) / 2)+ m->starta;
old_cor[0] = m->starta;
old_cor[1] = m->enda;
old_cor[2] = m->startb;
old_cor[3] = m->endb;
old_cor[4] = mid;
if(m->starta >= m->enda){
return OK;//hirsch_path;
}
if(m->startb >= m->endb){
return OK;///hirsch_path;
}
//fprintf(stderr,"Forward:%d-%d %d-%d\n",m->starta,m->enda,m->startb,m->endb);
m->enda = mid;
m->starta_2 = mid;
m->enda_2 = old_cor[1];
//fprintf(stderr,"Forward:%d-%d %d-%d\n",m->starta,m->enda,m->startb,m->endb);
if(m->seq1){
#ifdef HAVE_OPENMP
#pragma omp task shared(m) if(m->enda - m->starta > 1000)
#endif
aln_seqseq_foward(m);
}else if(m->prof2){
#ifdef HAVE_OPENMP
#pragma omp task shared(m)if(m->enda - m->starta > 1000)
#endif
aln_profileprofile_foward(m);
}else{
#ifdef HAVE_OPENMP
#pragma omp task shared(m)if(m->enda - m->starta > 1000)
#endif
aln_seqprofile_foward(m);
}
/* CRITICAL */
//m->starta = mid;
//m->enda = old_cor[1];
//fprintf(stderr,"Backward:%d-%d %d-%d\n",m->starta,m->enda,m->startb,m->endb);
if(m->seq1){
#ifdef HAVE_OPENMP
#pragma omp task shared(m) if(m->enda_2 - m->starta_2 > 1000)
#endif
aln_seqseq_backward(m);
#ifdef HAVE_OPENMP
#pragma omp taskwait
#endif
aln_seqseq_meetup(m,old_cor,&meet,&transition,&score);
}else if(m->prof2){
#ifdef HAVE_OPENMP
#pragma omp task shared(m)if(m->enda_2 - m->starta_2 > 1000)
#endif
aln_profileprofile_backward(m);
#ifdef HAVE_OPENMP
#pragma omp taskwait
#endif
aln_profileprofile_meetup(m,old_cor,&meet,&transition,&score);
}else{
#ifdef HAVE_OPENMP
#pragma omp task shared(m)if(m->enda_2 - m->starta_2 > 1000)
#endif
aln_seqprofile_backward(m);
#ifdef HAVE_OPENMP
#pragma omp taskwait
#endif
aln_seqprofile_meetup(m,old_cor,&meet,&transition,&score);
}
if(m->mode == ALN_MODE_SCORE_ONLY){
m->score = score;
}else{
aln_continue(m, input_states,old_cor,path, meet, transition);
}
return OK;
}
int aln_continue(struct aln_mem* m,float input_states[],int old_cor[],int* path,int meet,int transition)
{
//fprintf(stderr,"Transition:%d at:%d\n",transition,c);
//LOG_MSG("MAX: %f",max);
//j = hirsch_path[0];
switch(transition){
case 1: //a -> a = 1
path[old_cor[4]] = meet;
path[old_cor[4]+1] = meet+1;
// fprintf(stderr,"Aligning:%d-%d\n",old_cor[4],c);
// fprintf(stderr,"Aligning:%d-%d\n",old_cor[4]+1,c+1);
//foward:
m->f[0].a = input_states[0];
m->f[0].ga = input_states[1];
m->f[0].gb = input_states[2];
m->b[0].a = 0.0F;
m->b[0].ga = -FLT_MAX;
m->b[0].gb = -FLT_MAX;
// fprintf(stderr,"Using this for start:%d %d %d\n",m->f[0].a,m->f[0].ga,m->f[0].gb);
m->starta = old_cor[0];
m->enda = old_cor[4]-1;
m->startb = old_cor[2];
m->endb = meet-1;
//fprintf(stderr,"Following first: %d what:%d-%d %d-%d\n",c-1,m->starta,m->enda,m->startb,m->endb);
aln_runner(m,path);
//backward:
m->starta = old_cor[4]+1;
m->enda = old_cor[1];
m->startb = meet+1;
m->endb = old_cor[3];
m->f[0].a = 0.0F;
m->f[0].ga = -FLT_MAX;
m->f[0].gb = -FLT_MAX;
m->b[0].a = input_states[3];
m->b[0].ga = input_states[4];
m->b[0].gb = input_states[5];
//fprintf(stderr,"Following last: %d what:%d-%d %d-%d\n",c+1,m->starta,m->enda,m->startb,m->endb);
aln_runner(m,path);
break;
case 2:// a -> ga = 2
path[old_cor[4]] = meet;
// fprintf(stderr,"Aligning:%d-%d\n",old_cor[4],c);
//foward:
m->f[0].a = input_states[0];
m->f[0].ga = input_states[1];
m->f[0].gb = input_states[2];
m->b[0].a = 0.0F;
m->b[0].ga = -FLT_MAX;
m->b[0].gb = -FLT_MAX;
m->starta = old_cor[0];
m->enda = old_cor[4]-1;
m->startb = old_cor[2];
m->endb = meet-1;
//fprintf(stderr,"Following first: %d what:%d-%d %d-%d\n",c-1,m->starta,m->enda,m->startb,m->endb);
aln_runner(m,path);
//backward:
m->starta = old_cor[4];
m->enda = old_cor[1];
m->startb = meet+1;
m->endb = old_cor[3];
m->f[0].a = -FLT_MAX;
m->f[0].ga = 0.0F;
m->f[0].gb = -FLT_MAX;
m->b[0].a = input_states[3];
m->b[0].ga = input_states[4];
m->b[0].gb = input_states[5];
//fprintf(stderr,"Following last: %d what:%d-%d %d-%d\n",c+1,m->starta,m->enda,m->startb,m->endb);
aln_runner(m,path);
break;
case 3:// a -> gb = 3
path[old_cor[4]] = meet;
// fprintf(stderr,"Aligning:%d-%d\n",old_cor[4],c);
//foward:
m->f[0].a = input_states[0];
m->f[0].ga = input_states[1];
m->f[0].gb = input_states[2];
m->b[0].a = 0.0F;
m->b[0].ga = -FLT_MAX;
m->b[0].gb = -FLT_MAX;
m->starta = old_cor[0];
m->enda = old_cor[4]-1;
m->startb = old_cor[2];
m->endb = meet-1;
//fprintf(stderr,"Following first: %d what:%d-%d %d-%d\n",c-1,m->starta,m->enda,m->startb,m->endb);
aln_runner(m,path);
//backward:
m->starta = old_cor[4]+1;
m->enda = old_cor[1];
m->startb = meet;
m->endb = old_cor[3];
m->f[0].a = -FLT_MAX;
m->f[0].ga = -FLT_MAX;
m->f[0].gb = 0.0;
m->b[0].a = input_states[3];
m->b[0].ga = input_states[4];
m->b[0].gb = input_states[5];
//fprintf(stderr,"Following last: %d\n",c+1);
aln_runner(m,path);
break;
case 5://ga -> a = 5
path[old_cor[4]+1] = meet+1;
// fprintf(stderr,"Aligning:%d-%d\n",old_cor[4]+1,c+1);
//foward:
m->f[0].a = input_states[0];
m->f[0].ga = input_states[1];
m->f[0].gb = input_states[2];
m->b[0].a = -FLT_MAX;
m->b[0].ga = 0.0F;
m->b[0].gb = -FLT_MAX;
m->starta = old_cor[0];
m->enda = old_cor[4];
m->startb = old_cor[2];
m->endb = meet-1;
//fprintf(stderr,"Following first: %d what:%d-%d %d-%d\n",c-1,m->starta,m->enda,m->startb,m->endb);
aln_runner(m,path);
//backward:
m->starta = old_cor[4]+1;
m->enda = old_cor[1];
m->startb = meet+1;
m->endb = old_cor[3];
m->f[0].a = 0.0F;
m->f[0].ga = -FLT_MAX;
m->f[0].gb = -FLT_MAX;
m->b[0].a = input_states[3];
m->b[0].ga = input_states[4];
m->b[0].gb = input_states[5];
//fprintf(stderr,"Following last: %d\n",c+1);
aln_runner(m,path);
break;
case 6://gb->gb = 6;
//foward:
m->f[0].a = input_states[0];
m->f[0].ga = input_states[1];
m->f[0].gb = input_states[2];
m->b[0].a = -FLT_MAX;
m->b[0].ga = -FLT_MAX;
m->b[0].gb = 0.0F;
m->starta = old_cor[0];
m->enda = old_cor[4]-1;
m->startb = old_cor[2];
m->endb = meet;
//fprintf(stderr,"Following first: %d what:%d-%d %d-%d\n",c-1,m->starta,m->enda,m->startb,m->endb);
aln_runner(m,path);
//backward:
m->starta = old_cor[4]+1;
m->enda = old_cor[1];
m->startb = meet;
m->endb = old_cor[3];
m->f[0].a = -FLT_MAX;
m->f[0].ga = -FLT_MAX;
m->f[0].gb = 0.0F;
m->b[0].a = input_states[3];
m->b[0].ga = input_states[4];
m->b[0].gb = input_states[5];
//fprintf(stderr,"Following last: %d\n",c+
aln_runner(m,path);
break;
case 7://gb->a = 7;
path[old_cor[4]+1] = meet+1;
// fprintf(stderr,"Aligning:%d-%d\n",old_cor[4]+1,c+1);
//foward:
m->f[0].a = input_states[0];
m->f[0].ga = input_states[1];
m->f[0].gb = input_states[2];
m->b[0].a = -FLT_MAX;
m->b[0].ga = -FLT_MAX;
m->b[0].gb = 0.0F;
m->starta = old_cor[0];
m->enda = old_cor[4]-1;
m->startb = old_cor[2];
m->endb = meet;
//fprintf(stderr,"Following first: %d what:%d-%d %d-%d\n",c-1,m->starta,m->enda,m->startb,m->endb);
aln_runner(m,path);
//backward:
m->starta = old_cor[4]+1;
m->enda = old_cor[1];
m->startb = meet+1;
m->endb = old_cor[3];
m->f[0].a = 0.0F;
m->f[0].ga = -FLT_MAX;
m->f[0].gb = -FLT_MAX;
m->b[0].a = input_states[3];
m->b[0].ga = input_states[4];
m->b[0].gb = input_states[5];
//fprintf(stderr,"Following last: %d\n",c+1);
aln_runner(m,path);
break;
default:
break;
}
return OK;
}
|
securezip_fmt_plug.c | /*
* JtR format to crack PKWARE's SecureZIP archives. The same archive format is
* used by "Directory Opus" software.
*
* See "APPNOTE-6.3.4.TXT" for more information about SecureZIP.
*
* This software is Copyright (c) 2017, Dhiru Kholia <kholia at kth.se> and and
* it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* Big thanks goes to PKWARE for documenting the archive format, and 7-Zip
* project for implementing the specification.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_securezip;
#elif FMT_REGISTERS_H
john_register_one(&fmt_securezip);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
#endif
#include "arch.h"
#include "misc.h"
#include "sha.h"
#include "aes.h"
#include "jumbo.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "securezip_common.h"
#include "memdbg.h"
#define FORMAT_LABEL "securezip"
#define FORMAT_NAME "PKWARE SecureZIP"
#define ALGORITHM_NAME "SHA1 AES 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 0
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN 1
#define SALT_ALIGN sizeof(int)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#ifndef SHA1_SIZE
#define SHA1_SIZE 20
#endif
#if defined (_OPENMP)
static int omp_t = 1;
#endif
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int any_cracked, *cracked;
static size_t cracked_size;
static struct custom_salt *cur_salt;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
any_cracked = 0;
cracked_size = sizeof(*cracked) * self->params.max_keys_per_crypt;
cracked = mem_calloc(cracked_size, 1);
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static void securezip_set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, sizeof(*saved_key));
}
static char *get_key(int index)
{
return saved_key[index];
}
// The KDF is not quite HMAC-SHA1
static int securezip_decrypt(struct custom_salt *cur_salt, char *password)
{
unsigned char digest[SHA1_SIZE];
unsigned char key[SHA1_SIZE * 2];
unsigned char buf[64];
unsigned char ivec[16];
unsigned char out[ERDLEN];
SHA_CTX ctx;
unsigned int i;
AES_KEY aes_decrypt_key;
// 1
SHA1_Init(&ctx);
SHA1_Update(&ctx, password, strlen(password));
SHA1_Final(digest, &ctx);
// 2
memset(buf, 0x36, 64);
for (i = 0; i < SHA1_SIZE; i++)
buf[i] ^= digest[i];
SHA1_Init(&ctx);
SHA1_Update(&ctx, buf, 64);
SHA1_Final(key, &ctx);
// 3
memset(buf, 0x5c, 64);
for (i = 0; i < SHA1_SIZE; i++)
buf[i] ^= digest[i];
SHA1_Init(&ctx);
SHA1_Update(&ctx, buf, 64);
SHA1_Final(key + SHA1_SIZE, &ctx);
// Decrypt ERD
AES_set_decrypt_key(key, cur_salt->bit_length, &aes_decrypt_key);
memcpy(ivec, cur_salt->iv, 16);
AES_cbc_encrypt(cur_salt->erd, out, cur_salt->erd_length, &aes_decrypt_key, ivec, AES_DECRYPT);
// Check padding, 8 bytes out of 16 should be enough.
return memcmp(out + cur_salt->erd_length - 16, "\x10\x10\x10\x10\x10\x10\x10\x10", 8) == 0;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
{
if (securezip_decrypt(cur_salt, saved_key[index])) {
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_securezip = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
securezip_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
securezip_common_valid,
fmt_default_split,
fmt_default_binary,
securezip_common_get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
securezip_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
ej1.c | #include <stdio.h>
#include <math.h>
#include <float.h>
#include <stdlib.h>
#include <omp.h>
#include "ctimer.h"
main(int argc, char**argv)
{
int tid;
int nthreads;
#pragma omp parallel private(tid,nthreads)
{
nthreads = omp_get_num_threads();
tid = omp_get_thread_num();
printf("soy el thread %u de %u \n", tid, nthreads);
}
} |
omp50_task_depend_mtx.c | // RUN: %libomp-compile-and-run
// Tests OMP 5.0 task dependences "mutexinoutset", emulates compiler codegen
// Mutually exclusive tasks get same input dependency info array
//
// Task tree created:
// task0 task1
// \ / \
// task2 task5
// / \
// task3 task4
// / \
// task6 <-->task7 (these two are mutually exclusive)
// \ /
// task8
//
#include <stdio.h>
#include <omp.h>
#ifdef _WIN32
#include <windows.h>
#define mysleep(n) Sleep(n)
#else
#include <unistd.h>
#define mysleep(n) usleep((n)*1000)
#endif
static int checker = 0; // to check if two tasks run simultaneously
static int err = 0;
#ifndef DELAY
#define DELAY 100
#endif
// ---------------------------------------------------------------------------
// internal data to emulate compiler codegen
typedef int(*entry_t)(int, int**);
typedef struct DEP {
size_t addr;
size_t len;
int flags;
} dep;
typedef struct ID {
int reserved_1;
int flags;
int reserved_2;
int reserved_3;
char *psource;
} id;
int thunk(int gtid, int** pshareds) {
int t = **pshareds;
int th = omp_get_thread_num();
#pragma omp atomic
++checker;
printf("task __%d, th %d\n", t, th);
if (checker != 1) {
err++;
printf("Error1, checker %d != 1\n", checker);
}
mysleep(DELAY);
if (checker != 1) {
err++;
printf("Error2, checker %d != 1\n", checker);
}
#pragma omp atomic
--checker;
return 0;
}
#ifdef __cplusplus
extern "C" {
#endif
int __kmpc_global_thread_num(id*);
extern int** __kmpc_omp_task_alloc(id *loc, int gtid, int flags,
size_t sz, size_t shar, entry_t rtn);
int
__kmpc_omp_task_with_deps(id *loc, int gtid, int **task, int nd, dep *dep_lst,
int nd_noalias, dep *noalias_dep_lst);
static id loc = {0, 2, 0, 0, ";file;func;0;0;;"};
#ifdef __cplusplus
} // extern "C"
#endif
// End of internal data
// ---------------------------------------------------------------------------
int main()
{
int i1,i2,i3,i4;
omp_set_num_threads(2);
#pragma omp parallel
{
#pragma omp single nowait
{
dep sdep[2];
int **ptr;
int gtid = __kmpc_global_thread_num(&loc);
int t = omp_get_thread_num();
#pragma omp task depend(in: i1, i2)
{ int th = omp_get_thread_num();
printf("task 0_%d, th %d\n", t, th);
mysleep(DELAY); }
#pragma omp task depend(in: i1, i3)
{ int th = omp_get_thread_num();
printf("task 1_%d, th %d\n", t, th);
mysleep(DELAY); }
#pragma omp task depend(in: i2) depend(out: i1)
{ int th = omp_get_thread_num();
printf("task 2_%d, th %d\n", t, th);
mysleep(DELAY); }
#pragma omp task depend(in: i1)
{ int th = omp_get_thread_num();
printf("task 3_%d, th %d\n", t, th);
mysleep(DELAY); }
#pragma omp task depend(out: i2)
{ int th = omp_get_thread_num();
printf("task 4_%d, th %d\n", t, th);
mysleep(DELAY+5); } // wait a bit longer than task 3
#pragma omp task depend(out: i3)
{ int th = omp_get_thread_num();
printf("task 5_%d, th %d\n", t, th);
mysleep(DELAY); }
// compiler codegen start
// task1
ptr = __kmpc_omp_task_alloc(&loc, gtid, 0, 28, 16, thunk);
sdep[0].addr = (size_t)&i1;
sdep[0].len = 0; // not used
sdep[0].flags = 4; // mx
sdep[1].addr = (size_t)&i4;
sdep[1].len = 0; // not used
sdep[1].flags = 4; // mx
**ptr = t + 10; // init single shared variable
__kmpc_omp_task_with_deps(&loc, gtid, ptr, 2, sdep, 0, 0);
// task2
ptr = __kmpc_omp_task_alloc(&loc, gtid, 0, 28, 16, thunk);
**ptr = t + 20; // init single shared variable
__kmpc_omp_task_with_deps(&loc, gtid, ptr, 2, sdep, 0, 0);
// compiler codegen end
#pragma omp task depend(in: i1)
{ int th = omp_get_thread_num();
printf("task 8_%d, th %d\n", t, th);
mysleep(DELAY); }
} // single
} // parallel
if (err == 0) {
printf("passed\n");
return 0;
} else {
printf("failed\n");
return 1;
}
}
|
omp_for_lastprivate.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
int sum0;
#pragma omp threadprivate(sum0)
int test_omp_for_lastprivate()
{
int sum = 0;
int known_sum;
int i0;
i0 = -1;
#pragma omp parallel
{
sum0 = 0;
{ /* Begin of orphaned block */
int i;
#pragma omp for schedule(static,7) lastprivate(i0)
for (i = 1; i <= LOOPCOUNT; i++) {
sum0 = sum0 + i;
i0 = i;
} /* end of for */
} /* end of orphaned block */
#pragma omp critical
{
sum = sum + sum0;
} /* end of critical */
} /* end of parallel */
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
fprintf(stderr, "known_sum = %d , sum = %d\n",known_sum,sum);
fprintf(stderr, "LOOPCOUNT = %d , i0 = %d\n",LOOPCOUNT,i0);
return ((known_sum == sum) && (i0 == LOOPCOUNT));
}
int main()
{
int i;
int num_failed=0;
for (i = 0; i < REPETITIONS; i++) {
if(!test_omp_for_lastprivate()) {
num_failed++;
}
}
return num_failed;
}
|
GB_unop__identity_bool_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_bool_fc32)
// op(A') function: GB (_unop_tran__identity_bool_fc32)
// C type: bool
// A type: GxB_FC32_t
// cast: bool cij = (crealf (aij) != 0) || (cimagf (aij) != 0)
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
bool z = (crealf (aij) != 0) || (cimagf (aij) != 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = (crealf (aij) != 0) || (cimagf (aij) != 0) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_bool_fc32)
(
bool *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
bool z = (crealf (aij) != 0) || (cimagf (aij) != 0) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
bool z = (crealf (aij) != 0) || (cimagf (aij) != 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_bool_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 24;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
kmp_aff_disable_hwloc.c | // RUN: %libomp-compile && env KMP_AFFINITY=disabled KMP_TOPOLOGY_METHOD=hwloc %libomp-run
// REQUIRES: hwloc
#include <stdio.h>
#include <stdlib.h>
// Test will assert() without fix
int test_affinity_disabled_plus_hwloc() {
#pragma omp parallel
{}
return 1;
}
int main(int argc, char **argv) {
int i, j;
int failed = 0;
if (!test_affinity_disabled_plus_hwloc()) {
failed = 1;
}
return failed;
}
|
wpapsk_fmt_plug.c | /*
* This software is Copyright (c) 2012 Lukas Odzioba <ukasz at openwall dot net>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* Code is based on Aircrack-ng source
*
* SSE2 code enhancement, Jim Fougeron, Jan, 2013.
* Also removed oSSL EVP code and coded what it does (which is simple), inline.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_wpapsk;
#elif FMT_REGISTERS_H
john_register_one(&fmt_wpapsk);
#else
#include <string.h>
#include <assert.h>
#include "arch.h"
#if !ARCH_LITTLE_ENDIAN
#undef SIMD_COEF_32
#undef SIMD_PARA_SHA1
#endif
#include "simd-intrinsics.h"
#include "formats.h"
#include "common.h"
#include "misc.h"
//#define WPAPSK_DEBUG
#include "wpapsk.h"
#include "sha.h"
// if this is uncommented, we will force building of SSE to be 'off'. It is
// useful in testing but 99.9% of the builds should have this undef commented out.
//#undef SIMD_COEF_32
#ifdef SIMD_COEF_32
#define NBKEYS (SIMD_COEF_32 * SIMD_PARA_SHA1)
#ifdef _OPENMP
#include <omp.h>
#endif
#else
#define NBKEYS 1
#ifdef _OPENMP
#include <omp.h>
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "wpapsk"
#if !HAVE_OPENSSL_CMAC_H
#ifdef _MSC_VER
#pragma message("Notice: WPAPSK (CPU) format built without support for 802.11w. Upgrade your OpenSSL.")
#else
#warning Notice: WPAPSK (CPU) format built without support for 802.11w. Upgrade your OpenSSL.
#endif
#define FORMAT_NAME "WPA/WPA2 PSK"
#else
#define FORMAT_NAME "WPA/WPA2/PMF PSK"
#endif
#define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME
#define MIN_KEYS_PER_CRYPT NBKEYS
#define MAX_KEYS_PER_CRYPT NBKEYS
extern wpapsk_password *inbuffer;
extern wpapsk_hash *outbuffer;
extern wpapsk_salt currentsalt;
extern hccap_t hccap;
extern mic_t *mic;
#ifdef SIMD_COEF_32
// Ok, now we have our MMX/SSE2/intr buffer.
// this version works properly for MMX, SSE2 (.S) and SSE2 intrinsic.
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3) )*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32*4 ) //for endianity conversion
static unsigned char (*sse_hash1);
static unsigned char (*sse_crypt1);
static unsigned char (*sse_crypt2);
static unsigned char (*sse_crypt);
#endif
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
self->params.max_keys_per_crypt *= omp_t;
#endif
assert(sizeof(hccap_t) == HCCAP_SIZE);
inbuffer = mem_alloc(sizeof(*inbuffer) *
self->params.max_keys_per_crypt);
outbuffer = mem_alloc(sizeof(*outbuffer) *
self->params.max_keys_per_crypt);
mic = mem_alloc(sizeof(*mic) *
self->params.max_keys_per_crypt);
#if defined (SIMD_COEF_32)
sse_hash1 = mem_calloc_align(self->params.max_keys_per_crypt,
SHA_BUF_SIZ * 4 * sizeof(*sse_hash1),
MEM_ALIGN_SIMD);
sse_crypt1 = mem_calloc_align(self->params.max_keys_per_crypt,
20 * sizeof(*sse_crypt1), MEM_ALIGN_SIMD);
sse_crypt2 = mem_calloc_align(self->params.max_keys_per_crypt,
20 * sizeof(*sse_crypt2), MEM_ALIGN_SIMD);
sse_crypt = mem_calloc_align(self->params.max_keys_per_crypt,
20 * sizeof(*sse_crypt), MEM_ALIGN_SIMD);
{
int index;
for (index = 0; index < self->params.max_keys_per_crypt; ++index) {
// set the length of all hash1 SSE buffer to 64+20 * 8 bits. The 64 is for the ipad/opad,
// the 20 is for the length of the SHA1 buffer that also gets into each crypt.
// Works for SSE2i and SSE2
((unsigned int *)sse_hash1)[15*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] = (84<<3); // all encrypts are 64+20 bytes.
sse_hash1[GETPOS(20,index)] = 0x80;
}
}
// From this point on, we ONLY touch the first 20 bytes (* SIMD_COEF_32) of each buffer 'block'. If !SHA_PARA', then only the first
// block is written to after this, if there are more that one SHA_PARA, then the start of each para block will be updated inside the inner loop.
#endif
/*
* Zeroize the lengths in case crypt_all() is called with some keys still
* not set. This may happen during self-tests.
*/
{
int i;
for (i = 0; i < self->params.max_keys_per_crypt; i++)
inbuffer[i].length = 0;
}
}
static void done(void)
{
#ifdef SIMD_COEF_32
MEM_FREE(sse_crypt);
MEM_FREE(sse_crypt2);
MEM_FREE(sse_crypt1);
MEM_FREE(sse_hash1);
#endif
MEM_FREE(mic);
MEM_FREE(outbuffer);
MEM_FREE(inbuffer);
}
#ifndef SIMD_COEF_32
static MAYBE_INLINE void wpapsk_cpu(int count,
wpapsk_password * in, wpapsk_hash * out, wpapsk_salt * salt)
{
int j;
int slen = salt->length + 4;
#ifdef _OPENMP
#pragma omp parallel for default(none) private(j) shared(count, slen, salt, in, out)
#endif
for (j = 0; j < count; j++) {
int i, k;
unsigned char essid[32 + 4];
union {
unsigned char c[64];
uint32_t i[16];
} buffer;
union {
unsigned char c[40];
uint32_t i[10];
} outbuf;
SHA_CTX ctx_ipad;
SHA_CTX ctx_opad;
SHA_CTX sha1_ctx;
memset(essid, 0, 32 + 4);
memcpy(essid, salt->salt, salt->length);
memset(buffer.c, 0, 64);
memcpy(buffer.c, in[j].v, in[j].length);
SHA1_Init(&ctx_ipad);
SHA1_Init(&ctx_opad);
for (i = 0; i < 16; i++)
buffer.i[i] ^= 0x36363636;
SHA1_Update(&ctx_ipad, buffer.c, 64);
for (i = 0; i < 16; i++)
buffer.i[i] ^= 0x6a6a6a6a;
SHA1_Update(&ctx_opad, buffer.c, 64);
essid[slen - 1] = 1;
memcpy(&sha1_ctx, &ctx_ipad, sizeof(sha1_ctx));
SHA1_Update(&sha1_ctx, essid, slen);
SHA1_Final(outbuf.c, &sha1_ctx);
memcpy(&sha1_ctx, &ctx_opad, sizeof(sha1_ctx));
SHA1_Update(&sha1_ctx, outbuf.c, 20);
SHA1_Final(outbuf.c, &sha1_ctx);
memcpy(buffer.c, outbuf.c, 20);
for (i = 1; i < 4096; i++) {
memcpy(&sha1_ctx, &ctx_ipad, sizeof(sha1_ctx));
SHA1_Update(&sha1_ctx, buffer.c, 20);
SHA1_Final(buffer.c, &sha1_ctx);
memcpy(&sha1_ctx, &ctx_opad, sizeof(sha1_ctx));
SHA1_Update(&sha1_ctx, buffer.c, 20);
SHA1_Final(buffer.c, &sha1_ctx);
for (k = 0; k < 5; k++)
outbuf.i[k] ^= buffer.i[k];
}
essid[slen - 1] = 2;
memcpy(&sha1_ctx, &ctx_ipad, sizeof(sha1_ctx));
SHA1_Update(&sha1_ctx, essid, slen);
SHA1_Final(&outbuf.c[20], &sha1_ctx);
memcpy(&sha1_ctx, &ctx_opad, sizeof(sha1_ctx));
SHA1_Update(&sha1_ctx, &outbuf.c[20], 20);
SHA1_Final(&outbuf.c[20], &sha1_ctx);
memcpy(buffer.c, &outbuf.c[20], 20);
for (i = 1; i < 4096; i++) {
memcpy(&sha1_ctx, &ctx_ipad, sizeof(sha1_ctx));
SHA1_Update(&sha1_ctx, buffer.c, 20);
SHA1_Final(buffer.c, &sha1_ctx);
memcpy(&sha1_ctx, &ctx_opad, sizeof(sha1_ctx));
SHA1_Update(&sha1_ctx, buffer.c, 20);
SHA1_Final(buffer.c, &sha1_ctx);
for (k = 5; k < 8; k++)
outbuf.i[k] ^= buffer.i[k - 5];
}
memcpy(&out[j], outbuf.c, 32);
}
}
#else
static MAYBE_INLINE void wpapsk_sse(int count, wpapsk_password * in, wpapsk_hash * out, wpapsk_salt * salt)
{
int t; // thread count
int slen = salt->length + 4;
int loops = (count+NBKEYS-1) / NBKEYS;
#ifdef _OPENMP
#pragma omp parallel for default(none) private(t) shared(count, slen, salt, in, out, loops, sse_crypt1, sse_crypt2, sse_hash1)
#endif
for (t = 0; t < loops; t++) {
unsigned int i, k, j;
unsigned char essid[32 + 4];
union {
unsigned char c[64];
uint32_t i[16];
} buffer[NBKEYS];
union {
unsigned char c[40];
uint32_t i[10];
} outbuf[NBKEYS];
SHA_CTX ctx_ipad[NBKEYS];
SHA_CTX ctx_opad[NBKEYS];
SHA_CTX sha1_ctx;
unsigned int *i1, *i2, *o1;
unsigned char *t_sse_crypt1, *t_sse_crypt2, *t_sse_hash1;
// All pointers get their offset for this thread here. No further offsetting below.
t_sse_crypt1 = &sse_crypt1[t * NBKEYS * 20];
t_sse_crypt2 = &sse_crypt2[t * NBKEYS * 20];
t_sse_hash1 = &sse_hash1[t * NBKEYS * SHA_BUF_SIZ * 4];
i1 = (unsigned int*)t_sse_crypt1;
i2 = (unsigned int*)t_sse_crypt2;
o1 = (unsigned int*)t_sse_hash1;
memset(essid, 0, 32 + 4);
memcpy(essid, salt->salt, salt->length);
for (j = 0; j < NBKEYS; ++j) {
memcpy(buffer[j].c, in[t*NBKEYS+j].v, in[t*NBKEYS+j].length);
memset(&buffer[j].c[in[t*NBKEYS+j].length], 0, 64-in[t*NBKEYS+j].length);
SHA1_Init(&ctx_ipad[j]);
SHA1_Init(&ctx_opad[j]);
for (i = 0; i < 16; i++)
buffer[j].i[i] ^= 0x36363636;
SHA1_Update(&ctx_ipad[j], buffer[j].c, 64);
for (i = 0; i < 16; i++)
buffer[j].i[i] ^= 0x6a6a6a6a;
SHA1_Update(&ctx_opad[j], buffer[j].c, 64);
// we memcopy from flat into SIMD_COEF_32 output buffer's (our 'temp' ctx buffer).
// This data will NOT need to be BE swapped (it already IS BE swapped).
i1[(j/SIMD_COEF_32)*SIMD_COEF_32*5+(j&(SIMD_COEF_32-1))+0*SIMD_COEF_32] = ctx_ipad[j].h0;
i1[(j/SIMD_COEF_32)*SIMD_COEF_32*5+(j&(SIMD_COEF_32-1))+1*SIMD_COEF_32] = ctx_ipad[j].h1;
i1[(j/SIMD_COEF_32)*SIMD_COEF_32*5+(j&(SIMD_COEF_32-1))+2*SIMD_COEF_32] = ctx_ipad[j].h2;
i1[(j/SIMD_COEF_32)*SIMD_COEF_32*5+(j&(SIMD_COEF_32-1))+3*SIMD_COEF_32] = ctx_ipad[j].h3;
i1[(j/SIMD_COEF_32)*SIMD_COEF_32*5+(j&(SIMD_COEF_32-1))+4*SIMD_COEF_32] = ctx_ipad[j].h4;
i2[(j/SIMD_COEF_32)*SIMD_COEF_32*5+(j&(SIMD_COEF_32-1))+0*SIMD_COEF_32] = ctx_opad[j].h0;
i2[(j/SIMD_COEF_32)*SIMD_COEF_32*5+(j&(SIMD_COEF_32-1))+1*SIMD_COEF_32] = ctx_opad[j].h1;
i2[(j/SIMD_COEF_32)*SIMD_COEF_32*5+(j&(SIMD_COEF_32-1))+2*SIMD_COEF_32] = ctx_opad[j].h2;
i2[(j/SIMD_COEF_32)*SIMD_COEF_32*5+(j&(SIMD_COEF_32-1))+3*SIMD_COEF_32] = ctx_opad[j].h3;
i2[(j/SIMD_COEF_32)*SIMD_COEF_32*5+(j&(SIMD_COEF_32-1))+4*SIMD_COEF_32] = ctx_opad[j].h4;
essid[slen - 1] = 1;
memcpy(&sha1_ctx, &ctx_ipad[j], sizeof(sha1_ctx));
SHA1_Update(&sha1_ctx, essid, slen);
SHA1_Final(outbuf[j].c, &sha1_ctx);
memcpy(&sha1_ctx, &ctx_opad[j], sizeof(sha1_ctx));
SHA1_Update(&sha1_ctx, outbuf[j].c, SHA_DIGEST_LENGTH);
SHA1_Final(outbuf[j].c, &sha1_ctx);
// memcpy(buffer[j].c, &outbuf[j], 20);
// now convert this from flat into SIMD_COEF_32 buffers. (same as the memcpy() commented out in the last line)
// Also, perform the 'first' ^= into the crypt buffer. NOTE, we are doing that in BE format
// so we will need to 'undo' that in the end.
o1[(j/SIMD_COEF_32)*SIMD_COEF_32*SHA_BUF_SIZ+(j&(SIMD_COEF_32-1))+0*SIMD_COEF_32] = outbuf[j].i[0] = sha1_ctx.h0;
o1[(j/SIMD_COEF_32)*SIMD_COEF_32*SHA_BUF_SIZ+(j&(SIMD_COEF_32-1))+1*SIMD_COEF_32] = outbuf[j].i[1] = sha1_ctx.h1;
o1[(j/SIMD_COEF_32)*SIMD_COEF_32*SHA_BUF_SIZ+(j&(SIMD_COEF_32-1))+2*SIMD_COEF_32] = outbuf[j].i[2] = sha1_ctx.h2;
o1[(j/SIMD_COEF_32)*SIMD_COEF_32*SHA_BUF_SIZ+(j&(SIMD_COEF_32-1))+3*SIMD_COEF_32] = outbuf[j].i[3] = sha1_ctx.h3;
o1[(j/SIMD_COEF_32)*SIMD_COEF_32*SHA_BUF_SIZ+(j&(SIMD_COEF_32-1))+4*SIMD_COEF_32] = outbuf[j].i[4] = sha1_ctx.h4;
}
for (i = 1; i < 4096; i++) {
SIMDSHA1body((unsigned int*)t_sse_hash1, (unsigned int*)t_sse_hash1, (unsigned int*)t_sse_crypt1, SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT);
SIMDSHA1body((unsigned int*)t_sse_hash1, (unsigned int*)t_sse_hash1, (unsigned int*)t_sse_crypt2, SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT);
for (j = 0; j < NBKEYS; j++) {
unsigned *p = &((unsigned int*)t_sse_hash1)[(((j/SIMD_COEF_32)*SHA_BUF_SIZ)*SIMD_COEF_32) + (j&(SIMD_COEF_32-1))];
for (k = 0; k < 5; k++)
outbuf[j].i[k] ^= p[(k*SIMD_COEF_32)];
}
}
essid[slen - 1] = 2;
for (j = 0; j < NBKEYS; ++j) {
memcpy(&sha1_ctx, &ctx_ipad[j], sizeof(sha1_ctx));
SHA1_Update(&sha1_ctx, essid, slen);
SHA1_Final(&outbuf[j].c[20], &sha1_ctx);
memcpy(&sha1_ctx, &ctx_opad[j], sizeof(sha1_ctx));
SHA1_Update(&sha1_ctx, &outbuf[j].c[20], 20);
SHA1_Final(&outbuf[j].c[20], &sha1_ctx);
// memcpy(&buffer[j], &outbuf[j].c[20], 20);
// now convert this from flat into SIMD_COEF_32 buffers. (same as the memcpy() commented out in the last line)
// Also, perform the 'first' ^= into the crypt buffer. NOTE, we are doing that in BE format
// so we will need to 'undo' that in the end. (only 3 dwords of the 2nd block outbuf are worked with).
o1[(j/SIMD_COEF_32)*SIMD_COEF_32*SHA_BUF_SIZ+(j&(SIMD_COEF_32-1))+0*SIMD_COEF_32] = outbuf[j].i[5] = sha1_ctx.h0;
o1[(j/SIMD_COEF_32)*SIMD_COEF_32*SHA_BUF_SIZ+(j&(SIMD_COEF_32-1))+1*SIMD_COEF_32] = outbuf[j].i[6] = sha1_ctx.h1;
o1[(j/SIMD_COEF_32)*SIMD_COEF_32*SHA_BUF_SIZ+(j&(SIMD_COEF_32-1))+2*SIMD_COEF_32] = outbuf[j].i[7] = sha1_ctx.h2;
o1[(j/SIMD_COEF_32)*SIMD_COEF_32*SHA_BUF_SIZ+(j&(SIMD_COEF_32-1))+3*SIMD_COEF_32] = sha1_ctx.h3;
o1[(j/SIMD_COEF_32)*SIMD_COEF_32*SHA_BUF_SIZ+(j&(SIMD_COEF_32-1))+4*SIMD_COEF_32] = sha1_ctx.h4;
}
for (i = 1; i < 4096; i++) {
SIMDSHA1body((unsigned int*)t_sse_hash1, (unsigned int*)t_sse_hash1, (unsigned int*)t_sse_crypt1, SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT);
SIMDSHA1body((unsigned int*)t_sse_hash1, (unsigned int*)t_sse_hash1, (unsigned int*)t_sse_crypt2, SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT);
for (j = 0; j < NBKEYS; j++) {
unsigned *p = &((unsigned int*)t_sse_hash1)[(((j/SIMD_COEF_32)*SHA_BUF_SIZ)*SIMD_COEF_32) + (j&(SIMD_COEF_32-1))];
for (k = 5; k < 8; k++)
outbuf[j].i[k] ^= p[((k-5)*SIMD_COEF_32)];
}
}
for (j = 0; j < NBKEYS; ++j) {
// the BE() convert should be done in binary, BUT since we use 'common' code for
// get_binary(), which is shared between CPU and OpenCL, we have to do it here.
memcpy(out[t*NBKEYS+j].v, outbuf[j].c, 32);
alter_endianity_to_BE(out[t*NBKEYS+j].v,8);
}
}
}
#endif
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
extern volatile int bench_running;
if (new_keys || strcmp(last_ssid, hccap.essid) || bench_running) {
#ifndef SIMD_COEF_32
wpapsk_cpu(count, inbuffer, outbuffer, ¤tsalt);
#else
wpapsk_sse(count, inbuffer, outbuffer, ¤tsalt);
#endif
new_keys = 0;
strcpy(last_ssid, hccap.essid);
}
wpapsk_postprocess(count);
return count;
}
struct fmt_main fmt_wpapsk = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
8,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_OMP,
{
#if HAVE_OPENSSL_CMAC_H
"key version [1:WPA 2:WPA2 3:802.11w]"
#else
"key version [1:WPA 2:WPA2]"
#endif
},
{ FORMAT_TAG },
tests
},
{
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
get_keyver,
},
fmt_default_source,
{
binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
salt_compare,
set_salt,
set_key,
get_key,
clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
softmax-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file softmax-inl.h
* \brief
*/
#ifndef MXNET_OPERATOR_NN_SOFTMAX_INL_H_
#define MXNET_OPERATOR_NN_SOFTMAX_INL_H_
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include <type_traits>
#include "../mxnet_op.h"
#include "../operator_common.h"
#include "../tensor/broadcast_reduce_op.h"
#include "../../common/cuda_utils.h"
namespace mxnet {
namespace op {
namespace mxnet_op {
struct softmax_fwd {
template<typename AType>
MSHADOW_XINLINE static AType Map(float a, AType b) {
return AType(expf(a)/b);
}
template<typename AType>
MSHADOW_XINLINE static AType Map(double a, AType b) {
return AType(exp(a)/b);
}
};
struct log_softmax_fwd {
template<typename DType>
MSHADOW_XINLINE static float Map(DType a, float b) {
return a - logf(b);
}
template<typename DType>
MSHADOW_XINLINE static double Map(DType a, double b) {
return a - log(b);
}
};
template<typename OP, bool negate, typename AType, typename DType, typename OType,
typename IType, int ndim>
inline void Softmax(Stream<cpu> *s, DType *in, OType *out, IType *length,
Shape<ndim> shape, int axis, const DType temperature) {
index_t M = shape[axis];
index_t N = shape.Size()/M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
index_t sa = stride[axis];
if (length == nullptr) {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t base = unravel_dot(i, sshape, stride);
DType mmax = negate ? -in[base] : in[base];
DType val;
for (index_t j = 1; j < M; ++j) {
val = negate ? -in[base + j*sa] : in[base + j*sa];
if (mmax < val) mmax = val;
}
AType sum = AType(0);
DType in_val;
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
if (temperature == 1.0) {
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
sum += std::exp(in_val - mmax);
}
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
out[base + j*sa] = OP::Map(in_val - mmax, sum);
}
} else {
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
sum += std::exp((in_val - mmax)/temperature);
}
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
out[base + j*sa] = OP::Map((in_val - mmax)/temperature, sum);
}
}
}
} else {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t len = static_cast<index_t>(length[i]);
index_t base = unravel_dot(i, sshape, stride);
DType mmax = negate ? -in[base] : in[base];
DType val;
for (index_t j = 1; j < len; ++j) {
val = negate ? -in[base + j*sa] : in[base + j*sa];
if (mmax < val) mmax = val;
}
for (index_t j = len; j < M; ++j) {
out[base + j*sa] = OType(0.0f);
}
AType sum = AType(0);
DType in_val;
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
if (temperature == 1.0) {
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
sum += std::exp(in_val - mmax);
}
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
out[base + j*sa] = OP::Map(in_val - mmax, sum);
}
} else {
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
sum += std::exp((in_val - mmax)/temperature);
}
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
out[base + j*sa] = OP::Map((in_val - mmax)/temperature, sum);
}
}
}
}
}
struct softmax_bwd {
template<typename DType, typename AType>
MSHADOW_XINLINE static AType Map(DType ograd, DType out, AType sum) {
return AType(out * (ograd - sum));
}
};
struct log_softmax_bwd {
template<typename AType>
MSHADOW_XINLINE static AType Map(float ograd, float out, AType sum) {
return AType(ograd - expf(out)*sum);
}
template<typename AType>
MSHADOW_XINLINE static AType Map(double ograd, double out, AType sum) {
return AType(ograd - exp(out)*sum);
}
};
template<typename OP1, typename OP2, int Req, bool negate,
typename AType, typename DType, typename OType, typename IType, int ndim>
inline void SoftmaxGrad(Stream<cpu> *s, OType *out, OType *ograd,
DType *igrad, IType *length, Shape<ndim> shape,
int axis, const DType temperature) {
index_t M = shape[axis];
index_t N = shape.Size()/M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
index_t sa = stride[axis];
if (length != nullptr) {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t base = unravel_dot(i, sshape, stride);
index_t len = static_cast<index_t>(length[i]);
AType sum = AType(0);
for (index_t j = 0; j < len; ++j) {
sum += OP1::Map(ograd[base + j*sa], out[base + j*sa]);
}
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
DType final_result;
if (temperature == 1.0) {
for (index_t j = 0; j < M; ++j) {
final_result = negate ?
-OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) :
OP2::Map(ograd[base + j*sa], out[base + j*sa], sum);
final_result = (j < len) ? final_result : DType(0.0f);
KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result);
}
} else {
for (index_t j = 0; j < M; ++j) {
final_result = negate ?
-OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature :
OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature;
final_result = (j < len) ? final_result : DType(0.0f);
KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result);
}
}
}
} else {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t base = unravel_dot(i, sshape, stride);
AType sum = AType(0);
for (index_t j = 0; j < M; ++j) {
sum += OP1::Map(ograd[base + j*sa], out[base + j*sa]);
}
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
DType final_result;
if (temperature == 1.0) {
for (index_t j = 0; j < M; ++j) {
final_result = negate ?
-OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) :
OP2::Map(ograd[base + j*sa], out[base + j*sa], sum);
KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result);
}
} else {
for (index_t j = 0; j < M; ++j) {
final_result = negate ?
-OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature :
OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature;
KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result);
}
}
}
}
}
#ifdef __CUDACC__
template<int x_bits, typename OP, bool negate, typename AType, int ndim,
typename DType, typename OType, typename IType>
__global__ void softmax_compute_kernel(DType *in, OType *out, IType *length,
index_t M, int axis, Shape<ndim> sshape,
Shape<ndim> stride, const double temperature) {
const unsigned x_size = 1 << x_bits;
__shared__ AType smem[x_size];
index_t sa = stride[axis];
index_t base = unravel_dot(blockIdx.x, sshape, stride);
index_t x = threadIdx.x;
const index_t len = length == nullptr ? M : static_cast<index_t>(length[blockIdx.x]);
red::maximum::SetInitValue(smem[x]);
for (index_t i = x; i < len; i += x_size) {
smem[x] = ::max(smem[x], negate ? -in[base + i*sa] : in[base + i*sa]);
}
__syncthreads();
cuda::Reduce1D<red::maximum, x_bits>(smem);
__syncthreads();
DType smax = smem[0];
__syncthreads();
red::sum::SetInitValue(smem[x]);
DType val;
for (index_t i = x; i < len; i += x_size) {
val = negate ? -in[base + i*sa]:in[base + i*sa];
smem[x] += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature)));
}
__syncthreads();
cuda::Reduce1D<red::sum, x_bits>(smem);
__syncthreads();
AType ssum = smem[0];
__syncthreads();
for (index_t i = x; i < M; i += x_size) {
val = negate ? -in[base + i*sa] : in[base + i*sa];
out[base + i*sa] =
(i < len) ? OType(OP::Map((val - smax)/static_cast<DType>(temperature), ssum)) : OType(0.0f);
}
}
const int softmax_threads_per_block = 512;
template<typename OP, bool negate, typename AType, typename LType,
typename DType, typename OType, typename IType>
__global__ void softmax_stride1_compute_kernel(const DType *in, OType *out, IType *length,
const index_t M, const double temperature,
const int rows_per_block, const index_t total_rows) {
__shared__ AType scratch[softmax_threads_per_block];
__shared__ LType persistent_storage[20 * 1024 / sizeof(LType)];
const int warp_size = 32;
const int threads_per_row = softmax_threads_per_block / rows_per_block;
const int my_local_row = threadIdx.x / threads_per_row;
const int my_row = blockIdx.x * rows_per_block + my_local_row;
if (my_row >= total_rows) return;
const int my_id = threadIdx.x % threads_per_row;
const int entries_per_load = sizeof(LType)/sizeof(DType);
const index_t len = length == nullptr ? M : static_cast<index_t>(length[my_row]);
// Due to usage of MSHADOW_TYPE_SWITCH macro we are generating
// kernels where sizeof(LType) may be less than sizeof(DType),
// resulting in entries_per_load being 0.
// This is not a valid combination and is being checked against
// in the launcher code. This switch here is just to silence
// the division by zero warning generated for such invalid cases.
const int row_length = entries_per_load > 0 ? M / entries_per_load : 0;
const LType* in_aligned = reinterpret_cast<const LType*>(in);
size_t base = my_row * row_length;
for (index_t i = my_id; i < row_length; i += threads_per_row) {
persistent_storage[my_local_row * row_length + i] = in_aligned[base + i];
}
DType * row = reinterpret_cast<DType *>(persistent_storage + my_local_row * row_length);
__syncthreads();
DType my_max_value;
red::maximum::SetInitValue(my_max_value);
for (index_t i = my_id; i < len; i += threads_per_row) {
my_max_value = ::max(my_max_value, negate ? -row[i] : row[i]);
}
scratch[threadIdx.x] = my_max_value;
__syncthreads();
for (int size = threads_per_row / 2; size >= warp_size; size /= 2) {
if (my_id < size) {
scratch[threadIdx.x] = ::max(scratch[threadIdx.x], scratch[threadIdx.x + size]);
}
__syncthreads();
}
if (my_id < warp_size) {
AType my_value = warp_reduce(scratch[threadIdx.x],
[](AType x, AType y) { return ::max(x, y); });
scratch[threadIdx.x] = my_value;
}
__syncthreads();
DType smax = scratch[threadIdx.x - threadIdx.x % threads_per_row];
__syncthreads();
AType my_sum;
red::sum::SetInitValue(my_sum);
for (index_t i = my_id; i < len; i += threads_per_row) {
const DType val = negate ? -row[i] : row[i];
my_sum += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature)));
}
scratch[threadIdx.x] = my_sum;
__syncthreads();
for (int size = threads_per_row / 2; size >= warp_size; size /= 2) {
if (my_id < size) {
scratch[threadIdx.x] += scratch[threadIdx.x + size];
}
__syncthreads();
}
if (my_id < warp_size) {
AType my_value = warp_reduce(scratch[threadIdx.x],
[](AType x, AType y) { return x + y;});
scratch[threadIdx.x] = my_value;
}
__syncthreads();
AType ssum = scratch[threadIdx.x - threadIdx.x % threads_per_row];
__syncthreads();
for (index_t i = my_id; i < M; i += threads_per_row) {
const DType val = negate ? -row[i] : row[i];
row[i] = (i < len) ? DType(OP::Map((val - smax)/static_cast<DType>(temperature), ssum)) :
DType(0.0f);
}
__syncthreads();
LType* out_aligned = reinterpret_cast<LType*>(out);
for (index_t i = my_id; i < row_length; i += threads_per_row) {
out_aligned[base + i] = persistent_storage[my_local_row * row_length + i];
}
}
template<typename OP, bool negate, typename AType, typename DType, typename OType,
typename IType, int ndim>
inline void Softmax(Stream<gpu> *s, DType *in, OType *out, IType *length,
Shape<ndim> shape, int axis, const double temperature) {
const int x_bits = 7;
const int x_size = 1 << x_bits;
index_t M = shape[axis];
index_t N = shape.Size()/M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
const size_t DSize = sizeof(DType);
// Using 20 kB of shared memory for persistent storage in the optimized case
const size_t max_opt_M = 20 * 1024 / DSize;
if (stride[axis] == 1 &&
static_cast<size_t>(M) <= max_opt_M &&
std::is_same<DType, OType>::value) {
int ltype = mxnet::common::cuda::get_load_type(M * sizeof(DType));
MXNET_LOAD_TYPE_SWITCH(ltype, LType, {
int rows_per_block = mxnet::common::cuda::get_rows_per_block(M *
sizeof(DType) / sizeof(LType),
softmax_threads_per_block);
int nblocks = (N + rows_per_block - 1) / rows_per_block;
CHECK_LE(sizeof(DType), sizeof(LType));
softmax_stride1_compute_kernel<OP, negate, AType, LType>
<<<nblocks, softmax_threads_per_block, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
in, out, length, M, temperature, rows_per_block, N);
});
MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_stride1_compute_kernel);
} else {
softmax_compute_kernel<x_bits, OP, negate, AType, ndim>
<<<N, x_size, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
in, out, length, M, axis, sshape, stride, temperature);
MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_compute_kernel);
}
}
template<typename OP1, typename OP2, int Req, bool negate, typename AType, typename LType,
typename DType, typename OType, typename IType>
__global__ void softmax_stride1_grad_kernel(const OType *out, const OType *ograd,
DType *igrad, const IType *length,
const index_t M,
const double temperature,
const int rows_per_block,
const index_t total_rows) {
__shared__ AType scratch[softmax_threads_per_block];
__shared__ LType persistent_storage[20 * 1024 / sizeof(LType)];
const int warp_size = 32;
const int threads_per_row = softmax_threads_per_block / rows_per_block;
const int my_local_row = threadIdx.x / threads_per_row;
const int my_row = blockIdx.x * rows_per_block + my_local_row;
if (my_row >= total_rows) return;
const int my_id = threadIdx.x % threads_per_row;
const int entries_per_load = sizeof(LType)/sizeof(DType);
const index_t len = length == nullptr ? M : static_cast<index_t>(length[my_row]);
// Due to usage of MSHADOW_TYPE_SWITCH macro we are generating
// kernels where sizeof(LType) may be less than sizeof(DType),
// resulting in entries_per_load being 0.
// This is not a valid combination and is being checked against
// in the launcher code. This switch here is just to silence
// the division by zero warning generated for such invalid cases.
const int row_length = entries_per_load > 0 ? M / entries_per_load : 0;
const LType* out_aligned = reinterpret_cast<const LType*>(out);
const LType* ograd_aligned = reinterpret_cast<const LType*>(ograd);
size_t base = my_row * row_length;
for (index_t i = my_id; i < row_length; i += threads_per_row) {
persistent_storage[my_local_row * row_length * 2 + i] = out_aligned[base + i];
persistent_storage[my_local_row * row_length * 2 + row_length + i] = ograd_aligned[base + i];
}
DType * row = reinterpret_cast<DType *>(persistent_storage + my_local_row * row_length * 2);
__syncthreads();
AType my_sum_value;
red::sum::SetInitValue(my_sum_value);
for (index_t i = my_id; i < len; i += threads_per_row) {
my_sum_value += OP1::Map(row[i + M], row[i]);
}
scratch[threadIdx.x] = my_sum_value;
__syncthreads();
for (int size = threads_per_row / 2; size >= warp_size; size /= 2) {
if (my_id < size) {
scratch[threadIdx.x] = scratch[threadIdx.x] + scratch[threadIdx.x + size];
}
__syncthreads();
}
if (my_id < warp_size) {
AType my_value = warp_reduce(scratch[threadIdx.x],
[](AType x, AType y) { return x + y; });
scratch[threadIdx.x] = my_value;
}
__syncthreads();
AType ssum = scratch[threadIdx.x - threadIdx.x % threads_per_row];
__syncthreads();
for (index_t i = my_id; i < M; i += threads_per_row) {
const DType val =
negate ?
-OP2::Map(row[i + M], row[i], ssum) :
OP2::Map(row[i + M], row[i], ssum);
row[i] = (i < len) ? DType(val / static_cast<DType>(temperature)) :
DType(0.0f);
if (Req == kAddTo) {
row[i] += igrad[my_row * M + i];
}
}
__syncthreads();
LType* igrad_aligned = reinterpret_cast<LType*>(igrad);
for (index_t i = my_id; i < row_length; i += threads_per_row) {
igrad_aligned[base + i] = persistent_storage[my_local_row * row_length * 2 + i];
}
}
template<int x_bits, typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim,
typename DType, typename OType, typename IType>
__global__ void softmax_grad_kernel(OType *out, OType *ograd, DType *igrad,
const IType *length, index_t M, int axis,
Shape<ndim> sshape, Shape<ndim> stride,
const double temperature) {
const unsigned x_size = 1 << x_bits;
__shared__ AType smem[x_size];
index_t sa = stride[axis];
index_t base = unravel_dot(blockIdx.x, sshape, stride);
index_t x = threadIdx.x;
index_t len = length != nullptr ? static_cast<index_t>(length[blockIdx.x]) : M;
red::sum::SetInitValue(smem[x]);
for (index_t i = x; i < len; i += x_size) {
smem[x] += OP1::Map(ograd[base + i*sa], out[base + i*sa]);
}
__syncthreads();
cuda::Reduce1D<red::sum, x_bits>(smem);
__syncthreads();
AType ssum = smem[0];
__syncthreads();
DType final_result;
for (index_t i = x; i < M; i += x_size) {
final_result =
negate ?
-OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum) :
OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum);
final_result = (i < len) ? final_result : DType(0.0f);
KERNEL_ASSIGN(igrad[base + i*sa], Req, final_result / static_cast<DType>(temperature));
}
}
template<typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim,
typename DType, typename OType, typename IType>
inline void SoftmaxGrad(Stream<gpu> *s, OType *out, OType *ograd,
DType *igrad, IType *length, Shape<ndim> shape, int axis,
const double temperature) {
const int x_bits = 7;
const int x_size = 1 << x_bits;
index_t M = shape[axis];
index_t N = shape.Size()/M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
const size_t DSize = sizeof(DType);
// Using 20 kB of shared memory for persistent storage in the optimized case
// Need to store both out and ograd, so M can be only half compared to
// forward pass.
const size_t max_opt_M = 20 * 1024 / DSize / 2;
if (stride[axis] == 1 &&
static_cast<size_t>(M) <= max_opt_M &&
std::is_same<DType, OType>::value) {
int ltype = mxnet::common::cuda::get_load_type(M * sizeof(DType));
MXNET_LOAD_TYPE_SWITCH(ltype, LType, {
int rows_per_block = mxnet::common::cuda::get_rows_per_block(M *
sizeof(DType) / sizeof(LType),
softmax_threads_per_block);
int nblocks = (N + rows_per_block - 1) / rows_per_block;
CHECK_LE(sizeof(DType), sizeof(LType));
softmax_stride1_grad_kernel<OP1, OP2, Req, negate, AType, LType>
<<<nblocks, softmax_threads_per_block, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
out, ograd, igrad, length, M, temperature, rows_per_block, N);
});
MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_stride1_grad_kernel);
} else {
softmax_grad_kernel<x_bits, OP1, OP2, Req, negate, AType, ndim>
<<<N, x_size, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
out, ograd, igrad, length, M, axis, sshape, stride, temperature);
MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_grad_kernel);
}
}
#endif
} // namespace mxnet_op
struct SoftmaxParam : public dmlc::Parameter<SoftmaxParam> {
int axis;
dmlc::optional<double> temperature;
dmlc::optional<int> dtype;
dmlc::optional<bool> use_length;
DMLC_DECLARE_PARAMETER(SoftmaxParam) {
DMLC_DECLARE_FIELD(axis).set_default(-1)
.describe("The axis along which to compute softmax.");
DMLC_DECLARE_FIELD(temperature).set_default(dmlc::optional<double>())
.describe("Temperature parameter in softmax");
DMLC_DECLARE_FIELD(dtype)
.add_enum("float16", mshadow::kFloat16)
.add_enum("float32", mshadow::kFloat32)
.add_enum("float64", mshadow::kFloat64)
.set_default(dmlc::optional<int>())
.describe("DType of the output in case this can't be inferred. "
"Defaults to the same as input's dtype if not defined (dtype=None).");
DMLC_DECLARE_FIELD(use_length)
.set_default(dmlc::optional<bool>(false))
.describe("Whether to use the length input as a mask over the data input.");
}
bool operator==(const SoftmaxParam& other) const {
return this->axis == other.axis &&
this->temperature == other.temperature &&
this->dtype == other.dtype &&
this->use_length == other.use_length;
}
};
static inline bool softmax_has_dtype_override(const nnvm::NodeAttrs& attrs) {
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
return param.dtype.has_value() && param.dtype.value() != -1;
}
static inline bool softmax_use_length(const nnvm::NodeAttrs& attrs) {
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
return param.use_length.value();
}
static inline bool SoftmaxOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(out_attrs->size(), 1);
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), softmax_use_length(attrs) ? 2U : 1U);
if (softmax_has_dtype_override(attrs)) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, param.dtype.value());
type_assign(&(*in_attrs)[0], (*out_attrs)[0]);
return true;
} else {
std::vector<int> tmp = {in_attrs->at(0)};
return ElemwiseType<1, 1>(attrs, &tmp, out_attrs);
}
}
static inline bool SoftmaxOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(out_attrs->size(), 1U);
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), param.use_length.value() ? 2U : 1U);
if (param.use_length.value()) {
mxnet::TShape& dshape = in_attrs->at(0);
mxnet::TShape tmp_shape((dshape.ndim() == 1) ? 1U : dshape.ndim() - 1, 1);
int j = 0;
int axis = param.axis != -1 ? param.axis : dshape.ndim() - 1;
for (int i = 0; i < dshape.ndim(); ++i) {
if (i != axis) {
tmp_shape[j++] = dshape[i];
}
}
SHAPE_ASSIGN_CHECK(*in_attrs, 1, tmp_shape);
}
mxnet::ShapeVector tmp = {in_attrs->at(0)};
return ElemwiseShape<1, 1>(attrs, &tmp, out_attrs);
}
static inline bool SoftmaxGradOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
if (softmax_use_length(attrs)) {
mxnet::ShapeVector ins = {in_attrs->at(0), in_attrs->at(1), in_attrs->at(3)};
mxnet::ShapeVector dgrad = {out_attrs->at(0)};
bool res = ElemwiseShape<3, 1>(attrs, &ins, &dgrad);
SHAPE_ASSIGN_CHECK(*in_attrs, 0, ins[0]);
SHAPE_ASSIGN_CHECK(*in_attrs, 1, ins[1]);
SHAPE_ASSIGN_CHECK(*in_attrs, 3, ins[2]);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dgrad[0]);
mxnet::ShapeVector length = {in_attrs->at(2)};
mxnet::ShapeVector lgrad = {out_attrs->at(1)};
res = (res && ElemwiseShape<1, 1>(attrs, &length, &lgrad));
SHAPE_ASSIGN_CHECK(*in_attrs, 2, length[0]);
SHAPE_ASSIGN_CHECK(*out_attrs, 1, lgrad[0]);
return res;
} else {
return ElemwiseShape<3, 1>(attrs, in_attrs, out_attrs);
}
} else {
return ElemwiseShape<2, 1>(attrs, in_attrs, out_attrs);
}
}
static inline bool SoftmaxGradOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(out_attrs->size(), softmax_use_length(attrs) ? 2U : 1U);
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
CHECK_EQ(in_attrs->size(), softmax_use_length(attrs) ? 4U : 3U);
int in_dtype = (*in_attrs)[1];
int out_dtype = (*in_attrs)[softmax_use_length(attrs) ? 3 : 2];
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_dtype);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_dtype);
if (softmax_use_length(attrs)) {
TYPE_ASSIGN_CHECK(*out_attrs, 1, in_attrs->at(2));
}
return (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1 &&
(!softmax_use_length(attrs) || ((*out_attrs)[1] != -1 && (*in_attrs)[1] != -1));
} else {
CHECK_EQ(in_attrs->size(), 2U);
int out_dtype = (*in_attrs)[1];
TYPE_ASSIGN_CHECK(*out_attrs, 0, out_dtype);
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_dtype);
return (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1;
}
}
static inline std::vector<std::pair<int, int> >
SoftmaxGradOpInplaceOption(const nnvm::NodeAttrs& attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
if (softmax_use_length(attrs)) {
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}, {2, 1}, {3, 0}};
} else {
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}, {2, 0}};
}
} else {
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}};
}
}
static inline uint32_t SoftmaxGradOpNumInputs(const nnvm::NodeAttrs& attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
return softmax_use_length(attrs) ? 4 : 3;
}
return 2;
}
static inline std::vector<std::string> SoftmaxGradOpInputNames(const nnvm::NodeAttrs& attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
if (softmax_use_length(attrs)) {
return std::vector<std::string>{"ograd", "data", "length", "output"};
} else {
return std::vector<std::string>{"ograd", "data", "output"};
}
} else {
return std::vector<std::string>{"ograd", "output"};
}
}
struct SoftmaxFGradient {
const char *op_name;
std::vector<nnvm::NodeEntry> operator()(const nnvm::ObjectPtr& n,
const std::vector<nnvm::NodeEntry>& ograds) const {
if (softmax_has_dtype_override(n->attrs) || softmax_use_length(n->attrs)) {
return ElemwiseGradUseInOut {op_name}(n, ograds);
} else {
return ElemwiseGradUseOut {op_name}(n, ograds);
}
}
};
template<typename xpu, typename OP, bool negate = false>
void SoftmaxCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp) return;
CHECK_NE(req[0], kAddTo);
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
int axis = CheckAxis(param.axis, inputs[0].ndim());
const double temperature = param.temperature.has_value() ?
param.temperature.value() : 1.0;
mxnet::TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true);
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", false);
if (!safe_acc && inputs[0].type_flag_ == mshadow::kFloat16) {
common::LogOnce("MXNET_SAFE_ACCUMULATION=1 is recommended for softmax with float16 inputs. "
"See https://mxnet.apache.org/api/faq/env_var "
"for more details.");
}
MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, DType, AType, {
MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, OType, {
int type = kInt32;
if (param.use_length.value()) {
CHECK(inputs.size() > 1)
<< "Mask needs to be provided when using softmax with use_length=True.";
type = inputs[1].type_flag_;
}
MXNET_INT32_INT64_TYPE_SWITCH(type, IType, {
IType* mask_ptr = nullptr;
if (param.use_length.value()) {
mask_ptr = inputs[1].dptr<IType>();
}
if (safe_acc) {
if (shape.ndim() == 2) {
Softmax<OP, negate, AType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(), mask_ptr, shape.get<2>(),
axis, static_cast<DType>(temperature));
} else {
Softmax<OP, negate, AType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(), mask_ptr, shape.get<3>(),
axis, static_cast<DType>(temperature));
}
} else {
if (shape.ndim() == 2) {
Softmax<OP, negate, DType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(), mask_ptr, shape.get<2>(),
axis, static_cast<DType>(temperature));
} else {
Softmax<OP, negate, DType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(), mask_ptr, shape.get<3>(),
axis, static_cast<DType>(temperature));
}
}
});
});
});
}
template<typename xpu, typename OP1, typename OP2, bool negate = false>
void SoftmaxGradCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (softmax_use_length(attrs)) {
MXNET_INT32_INT64_TYPE_SWITCH(inputs[2].type_flag_, IType, {
if (req[1] != kNullOp) {
mxnet_op::Kernel<mxnet_op::set_zero, xpu>::Launch(
ctx.get_stream<xpu>(), outputs[1].Size(), outputs[1].dptr<IType>());
}
});
}
if (req[0] == kNullOp) return;
const int itype = softmax_use_length(attrs) ? inputs[2].type_flag_ : kInt32;
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
int axis = CheckAxis(param.axis, inputs[0].ndim());
const double temperature = param.temperature.has_value() ?
param.temperature.value() : 1.0;
mxnet::TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true);
int out_idx = softmax_has_dtype_override(attrs) ? 2 : 1;
out_idx = softmax_use_length(attrs) ? 3 : out_idx;
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", false);
MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, OType, AType, {
MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MXNET_INT32_INT64_TYPE_SWITCH(itype, IType, {
IType * length_ptr = nullptr;
if (softmax_use_length(attrs)) {
length_ptr = inputs[2].dptr<IType>();
}
if (safe_acc) {
if (shape.ndim() == 2) {
SoftmaxGrad<OP1, OP2, Req, negate, AType>(
ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(), outputs[0].dptr<DType>(),
length_ptr, shape.get<2>(), axis,
static_cast<DType>(temperature));
} else {
SoftmaxGrad<OP1, OP2, Req, negate, AType>(
ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(), outputs[0].dptr<DType>(),
length_ptr, shape.get<3>(), axis,
static_cast<DType>(temperature));
}
} else {
if (shape.ndim() == 2) {
SoftmaxGrad<OP1, OP2, Req, negate, DType>(
ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(), outputs[0].dptr<DType>(),
length_ptr, shape.get<2>(), axis,
static_cast<DType>(temperature));
} else {
SoftmaxGrad<OP1, OP2, Req, negate, DType>(
ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(), outputs[0].dptr<DType>(),
length_ptr, shape.get<3>(), axis,
static_cast<DType>(temperature));
}
}
});
});
});
});
}
} // namespace op
} // namespace mxnet
namespace std {
template<>
struct hash<mxnet::op::SoftmaxParam> {
size_t operator()(const mxnet::op::SoftmaxParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.axis);
ret = dmlc::HashCombine(ret, val.temperature);
ret = dmlc::HashCombine(ret, val.dtype);
ret = dmlc::HashCombine(ret, val.use_length);
return ret;
}
};
} // namespace std
#endif // MXNET_OPERATOR_NN_SOFTMAX_INL_H_
|
omp_crit.c | // note not doing O0 below as to ensure we get tbaa
// TODO: %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O1 -disable-llvm-optzns %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -S | %clang -fopenmp -x ir - -o %s.out && %s.out
// TODO: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O1 %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -S | %clang -fopenmp -x ir - -o %s.out && %s.out ; fi
// RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O2 %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -S | %clang -fopenmp -x ir - -o %s.out && %s.out ; fi
// RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O3 %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -S | %clang -fopenmp -x ir - -o %s.out && %s.out ; fi
// note not doing O0 below as to ensure we get tbaa
// TODO: %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O1 -Xclang -disable-llvm-optzns %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -enzyme-inline=1 -S | %clang -fopenmp -x ir - -o %s.out && %s.out
// TODO: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O1 %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -enzyme-inline=1 -S | %clang -fopenmp -x ir - -o %s.out && %s.out ; fi
// RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O2 %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -enzyme-inline=1 -S | %clang -fopenmp -x ir - -o %s.out && %s.out ; fi
// RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O3 %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -enzyme-inline=1 -S | %clang -fopenmp -x ir - -o %s.out && %s.out ; fi
#include <stdio.h>
#include <math.h>
#include <assert.h>
#include "test_utils.h"
double __enzyme_autodiff(void*, ...);
float omp(float* a, int N) {
float res = 0.0;
#pragma omp parallel
{
double thread_res = 0.0;
#pragma omp for
for (int i=0; i<N; i++) {
thread_res += a[i] * a[i];
}
#pragma omp critical
{
res += thread_res;
}
}
return res;
}
int main(int argc, char** argv) {
int N = 20;
float a[N];
for(int i=0; i<N; i++) {
a[i] = i+1;
}
float d_a[N];
for(int i=0; i<N; i++)
d_a[i] = 0.0f;
//omp(*a, N);
printf("ran omp\n");
__enzyme_autodiff((void*)omp, a, d_a, N);
for(int i=0; i<N; i++) {
printf("a[%d]=%f d_a[%d]=%f\n", i, a[i], i, d_a[i]);
}
for(int i=0; i<N; i++) {
APPROX_EQ(d_a[i], 2.0f*(i+1), 1e-10);
}
return 0;
}
|
polybench.c | /**
* This version is stamped on May 10, 2016
*
* Contact:
* Louis-Noel Pouchet <pouchet.ohio-state.edu>
* Tomofumi Yuki <tomofumi.yuki.fr>
*
* Web address: http://polybench.sourceforge.net
*/
/* polybench.c: this file is part of PolyBench/C */
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <assert.h>
#include <time.h>
#include <sys/time.h>
#ifdef __linux__
#include <sys/resource.h>
#elif _WIN32
// Do not include <sys/resource.h>
#else
#include <sys/resource.h>
#endif
#include <sched.h>
#include <math.h>
#ifdef _OPENMP
# include <omp.h>
#endif
#if defined(POLYBENCH_PAPI)
# undef POLYBENCH_PAPI
# include "polybench.h"
# define POLYBENCH_PAPI
#else
# include "polybench.h"
#endif
/* By default, collect PAPI counters on thread 0. */
#ifndef POLYBENCH_THREAD_MONITOR
# define POLYBENCH_THREAD_MONITOR 0
#endif
/* Total LLC cache size. By default 32+MB.. */
#ifndef POLYBENCH_CACHE_SIZE_KB
# define POLYBENCH_CACHE_SIZE_KB 32770
#endif
int polybench_papi_counters_threadid = POLYBENCH_THREAD_MONITOR;
double polybench_program_total_flops = 0;
#ifdef POLYBENCH_PAPI
# include <papi.h>
# define POLYBENCH_MAX_NB_PAPI_COUNTERS 96
char* _polybench_papi_eventlist[] =
{
#include "papi_counters.list"
NULL
};
int polybench_papi_eventset;
int polybench_papi_eventlist[POLYBENCH_MAX_NB_PAPI_COUNTERS];
long_long polybench_papi_values[POLYBENCH_MAX_NB_PAPI_COUNTERS];
#endif
/*
* Allocation table, to enable inter-array padding. All data allocated
* with polybench_alloc_data should be freed with polybench_free_data.
*
*/
#define NB_INITIAL_TABLE_ENTRIES 512
struct polybench_data_ptrs
{
void** user_view;
void** real_ptr;
int nb_entries;
int nb_avail_entries;
};
static struct polybench_data_ptrs* _polybench_alloc_table = NULL;
static size_t polybench_inter_array_padding_sz = 0;
/* Timer code (gettimeofday). */
double polybench_t_start, polybench_t_end;
/* Timer code (RDTSC). */
unsigned long long int polybench_c_start, polybench_c_end;
static
double rtclock()
{
#if defined(POLYBENCH_TIME) || defined(POLYBENCH_GFLOPS)
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, NULL);
if (stat != 0)
printf ("Error return from gettimeofday: %d", stat);
return (Tp.tv_sec + Tp.tv_usec * 1.0e-6);
#else
return 0;
#endif
}
#ifdef POLYBENCH_CYCLE_ACCURATE_TIMER
static
unsigned long long int rdtsc()
{
unsigned long long int ret = 0;
unsigned int cycles_lo;
unsigned int cycles_hi;
__asm__ volatile ("RDTSC" : "=a" (cycles_lo), "=d" (cycles_hi));
ret = (unsigned long long int)cycles_hi << 32 | cycles_lo;
return ret;
}
#endif
void polybench_flush_cache()
{
int cs = POLYBENCH_CACHE_SIZE_KB * 1024 / sizeof(double);
double* flush = (double*) calloc (cs, sizeof(double));
int i;
double tmp = 0.0;
#ifdef _OPENMP
//#pragma omp parallel for reduction(+:tmp) private(i)
#endif
for (i = 0; i < cs; i++)
tmp += flush[i];
assert (tmp <= 10.0);
free (flush);
}
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
void polybench_linux_fifo_scheduler()
{
/* Use FIFO scheduler to limit OS interference. Program must be run
as root, and this works only for Linux kernels. */
struct sched_param schedParam;
schedParam.sched_priority = sched_get_priority_max (SCHED_FIFO);
sched_setscheduler (0, SCHED_FIFO, &schedParam);
}
void polybench_linux_standard_scheduler()
{
/* Restore to standard scheduler policy. */
struct sched_param schedParam;
schedParam.sched_priority = sched_get_priority_max (SCHED_OTHER);
sched_setscheduler (0, SCHED_OTHER, &schedParam);
}
#endif
#ifdef POLYBENCH_PAPI
static
void test_fail(char *file, int line, char *call, int retval)
{
char buf[128];
memset(buf, '\0', sizeof(buf));
if (retval != 0)
fprintf (stdout, "%-40s FAILED\nLine # %d\n", file, line);
else
{
fprintf (stdout, "%-40s SKIPPED\n", file);
fprintf (stdout, "Line # %d\n", line);
}
if (retval == PAPI_ESYS)
{
sprintf (buf, "System error in %s", call);
perror (buf);
}
else if (retval > 0)
fprintf (stdout, "Error: %s\n", call);
else if (retval == 0)
fprintf (stdout, "Error: %s\n", call);
else
{
char errstring[PAPI_MAX_STR_LEN];
// PAPI 5.4.3 has changed the API for PAPI_perror.
#if defined (PAPI_VERSION) && ((PAPI_VERSION_MAJOR(PAPI_VERSION) == 5 && PAPI_VERSION_MINOR(PAPI_VERSION) >= 4) || PAPI_VERSION_MAJOR(PAPI_VERSION) > 5)
fprintf (stdout, "Error in %s: %s\n", call, PAPI_strerror(retval));
#else
PAPI_perror (retval, errstring, PAPI_MAX_STR_LEN);
fprintf (stdout, "Error in %s: %s\n", call, errstring);
#endif
}
fprintf (stdout, "\n");
if (PAPI_is_initialized ())
PAPI_shutdown ();
exit (1);
}
void polybench_papi_init()
{
# ifdef _OPENMP
//#pragma omp parallel
{
#pragma omp master
{
if (omp_get_max_threads () < polybench_papi_counters_threadid)
polybench_papi_counters_threadid = omp_get_max_threads () - 1;
}
#pragma omp barrier
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
polybench_papi_eventset = PAPI_NULL;
if ((retval = PAPI_library_init (PAPI_VER_CURRENT)) != PAPI_VER_CURRENT)
test_fail (__FILE__, __LINE__, "PAPI_library_init", retval);
if ((retval = PAPI_create_eventset (&polybench_papi_eventset))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_create_eventset", retval);
int k;
for (k = 0; _polybench_papi_eventlist[k]; ++k)
{
if ((retval =
PAPI_event_name_to_code (_polybench_papi_eventlist[k],
&(polybench_papi_eventlist[k])))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_event_name_to_code", retval);
}
polybench_papi_eventlist[k] = 0;
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
void polybench_papi_close()
{
# ifdef _OPENMP
//#pragma omp parallel
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
if ((retval = PAPI_destroy_eventset (&polybench_papi_eventset))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_destroy_eventset", retval);
if (PAPI_is_initialized ())
PAPI_shutdown ();
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
int polybench_papi_start_counter(int evid)
{
# ifndef POLYBENCH_NO_FLUSH_CACHE
polybench_flush_cache();
# endif
# ifdef _OPENMP
# pragma omp parallel
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval = 1;
char descr[PAPI_MAX_STR_LEN];
PAPI_event_info_t evinfo;
PAPI_event_code_to_name (polybench_papi_eventlist[evid], descr);
if (PAPI_add_event (polybench_papi_eventset,
polybench_papi_eventlist[evid]) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_add_event", 1);
if (PAPI_get_event_info (polybench_papi_eventlist[evid], &evinfo)
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_get_event_info", retval);
if ((retval = PAPI_start (polybench_papi_eventset)) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_start", retval);
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
return 0;
}
void polybench_papi_stop_counter(int evid)
{
# ifdef _OPENMP
# pragma omp parallel
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
long_long values[1];
values[0] = 0;
if ((retval = PAPI_read (polybench_papi_eventset, &values[0]))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_read", retval);
if ((retval = PAPI_stop (polybench_papi_eventset, NULL)) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_stop", retval);
polybench_papi_values[evid] = values[0];
if ((retval = PAPI_remove_event
(polybench_papi_eventset,
polybench_papi_eventlist[evid])) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_remove_event", retval);
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
void polybench_papi_print()
{
int verbose = 0;
# ifdef _OPENMP
# pragma omp parallel
{
if (omp_get_thread_num() == polybench_papi_counters_threadid)
{
#ifdef POLYBENCH_PAPI_VERBOSE
verbose = 1;
#endif
if (verbose)
printf ("On thread %d:\n", polybench_papi_counters_threadid);
#endif
int evid;
for (evid = 0; polybench_papi_eventlist[evid] != 0; ++evid)
{
if (verbose)
printf ("%s=", _polybench_papi_eventlist[evid]);
printf ("%llu ", polybench_papi_values[evid]);
if (verbose)
printf ("\n");
}
printf ("\n");
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
#endif
/* ! POLYBENCH_PAPI */
void polybench_prepare_instruments()
{
#ifndef POLYBENCH_NO_FLUSH_CACHE
polybench_flush_cache ();
#endif
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
polybench_linux_fifo_scheduler ();
#endif
}
void polybench_timer_start()
{
polybench_prepare_instruments ();
#ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
polybench_t_start = rtclock ();
#else
polybench_c_start = rdtsc ();
#endif
}
void polybench_timer_stop()
{
#ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
polybench_t_end = rtclock ();
#else
polybench_c_end = rdtsc ();
#endif
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
polybench_linux_standard_scheduler ();
#endif
}
void polybench_timer_print()
{
#ifdef POLYBENCH_GFLOPS
if (polybench_program_total_flops == 0)
{
printf ("[PolyBench][WARNING] Program flops not defined, use polybench_set_program_flops(value)\n");
printf ("%0.6lf\n", polybench_t_end - polybench_t_start);
}
else
printf ("%0.2lf\n",
(polybench_program_total_flops /
(double)(polybench_t_end - polybench_t_start)) / 1000000000);
#else
# ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
printf ("%0.6f\n", polybench_t_end - polybench_t_start);
# else
printf ("%Ld\n", polybench_c_end - polybench_c_start);
# endif
#endif
}
/*
* These functions are used only if the user defines a specific
* inter-array padding. It grows a global structure,
* _polybench_alloc_table, which keeps track of the data allocated via
* polybench_alloc_data (on which inter-array padding is applied), so
* that the original, non-shifted pointer can be recovered when
* calling polybench_free_data.
*
*/
#ifdef POLYBENCH_ENABLE_INTARRAY_PAD
static
void grow_alloc_table()
{
if (_polybench_alloc_table == NULL ||
(_polybench_alloc_table->nb_entries % NB_INITIAL_TABLE_ENTRIES) != 0 ||
_polybench_alloc_table->nb_avail_entries != 0)
{
/* Should never happen if the API is properly used. */
fprintf (stderr, "[ERROR] Inter-array padding requires to use polybench_alloc_data and polybench_free_data\n");
exit (1);
}
size_t sz = _polybench_alloc_table->nb_entries;
sz += NB_INITIAL_TABLE_ENTRIES;
_polybench_alloc_table->user_view =
realloc (_polybench_alloc_table->user_view, sz * sizeof(void*));
assert(_polybench_alloc_table->user_view != NULL);
_polybench_alloc_table->real_ptr =
realloc (_polybench_alloc_table->real_ptr, sz * sizeof(void*));
assert(_polybench_alloc_table->real_ptr != NULL);
_polybench_alloc_table->nb_avail_entries = NB_INITIAL_TABLE_ENTRIES;
}
static
void* register_padded_pointer(void* ptr, size_t orig_sz, size_t padded_sz)
{
if (_polybench_alloc_table == NULL)
{
fprintf (stderr, "[ERROR] Inter-array padding requires to use polybench_alloc_data and polybench_free_data\n");
exit (1);
}
if (_polybench_alloc_table->nb_avail_entries == 0)
grow_alloc_table ();
int id = _polybench_alloc_table->nb_entries++;
_polybench_alloc_table->real_ptr[id] = ptr;
_polybench_alloc_table->user_view[id] = ptr + (padded_sz - orig_sz);
return _polybench_alloc_table->user_view[id];
}
static
void
free_data_from_alloc_table (void* ptr)
{
if (_polybench_alloc_table != NULL && _polybench_alloc_table->nb_entries > 0)
{
int i;
for (i = 0; i < _polybench_alloc_table->nb_entries; ++i)
if (_polybench_alloc_table->user_view[i] == ptr ||
_polybench_alloc_table->real_ptr[i] == ptr)
break;
if (i != _polybench_alloc_table->nb_entries)
{
free (_polybench_alloc_table->real_ptr[i]);
for (; i < _polybench_alloc_table->nb_entries - 1; ++i)
{
_polybench_alloc_table->user_view[i] =
_polybench_alloc_table->user_view[i + 1];
_polybench_alloc_table->real_ptr[i] =
_polybench_alloc_table->real_ptr[i + 1];
}
_polybench_alloc_table->nb_entries--;
_polybench_alloc_table->nb_avail_entries++;
if (_polybench_alloc_table->nb_entries == 0)
{
free (_polybench_alloc_table->user_view);
free (_polybench_alloc_table->real_ptr);
free (_polybench_alloc_table);
_polybench_alloc_table = NULL;
}
}
}
}
static
void check_alloc_table_state()
{
if (_polybench_alloc_table == NULL)
{
_polybench_alloc_table = (struct polybench_data_ptrs*)
malloc (sizeof(struct polybench_data_ptrs));
assert(_polybench_alloc_table != NULL);
_polybench_alloc_table->user_view =
(void**) malloc (sizeof(void*) * NB_INITIAL_TABLE_ENTRIES);
assert(_polybench_alloc_table->user_view != NULL);
_polybench_alloc_table->real_ptr =
(void**) malloc (sizeof(void*) * NB_INITIAL_TABLE_ENTRIES);
assert(_polybench_alloc_table->real_ptr != NULL);
_polybench_alloc_table->nb_entries = 0;
_polybench_alloc_table->nb_avail_entries = NB_INITIAL_TABLE_ENTRIES;
}
}
#endif // !POLYBENCH_ENABLE_INTARRAY_PAD
static
void*
xmalloc(size_t alloc_sz)
{
void* ret = NULL;
/* By default, post-pad the arrays. Safe behavior, but likely useless. */
polybench_inter_array_padding_sz += POLYBENCH_INTER_ARRAY_PADDING_FACTOR;
size_t padded_sz = alloc_sz + polybench_inter_array_padding_sz;
int err = posix_memalign (&ret, 4096, padded_sz);
if (! ret || err)
{
fprintf (stderr, "[PolyBench] posix_memalign: cannot allocate memory");
exit (1);
}
/* Safeguard: this is invoked only if polybench.c has been compiled
with inter-array padding support from polybench.h. If so, move
the starting address of the allocation and return it to the
user. The original pointer is registered in an allocation table
internal to polybench.c. Data must then be freed using
polybench_free_data, which will inspect the allocation table to
free the original pointer.*/
#ifdef POLYBENCH_ENABLE_INTARRAY_PAD
/* This moves the 'ret' pointer by (padded_sz - alloc_sz) positions, and
registers it in the lookup table for future free using
polybench_free_data. */
ret = register_padded_pointer(ret, alloc_sz, padded_sz);
#endif
return ret;
}
void polybench_free_data(void* ptr)
{
#ifdef POLYBENCH_ENABLE_INTARRAY_PAD
free_data_from_alloc_table (ptr);
#else
free (ptr);
#endif
}
void* polybench_alloc_data(unsigned long long int n, int elt_size)
{
#ifdef POLYBENCH_ENABLE_INTARRAY_PAD
check_alloc_table_state ();
#endif
/// FIXME: detect overflow!
size_t val = n;
val *= elt_size;
void* ret = xmalloc (val);
return ret;
}
|
omp_mpi_example.c | #include <omp.h>
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
int
main(int argc, char *argv[])
{
int numprocs, rank, namelen, id;
char processor_name[MPI_MAX_PROCESSOR_NAME];
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Get_processor_name(processor_name,&namelen);
printf("MPI: Process %d on %s out of %d\n",rank,processor_name,numprocs);
#pragma omp parallel private(id)
{
id = omp_get_thread_num();
printf("OMP: (%s) Thread %d\n",processor_name,id);
}
MPI_Finalize();
return(0);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.